net/mlx5: share Direct Rules/Verbs flow related structures
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9
10 /* Verbs header. */
11 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
12 #ifdef PEDANTIC
13 #pragma GCC diagnostic ignored "-Wpedantic"
14 #endif
15 #include <infiniband/verbs.h>
16 #ifdef PEDANTIC
17 #pragma GCC diagnostic error "-Wpedantic"
18 #endif
19
20 #include <rte_common.h>
21 #include <rte_ether.h>
22 #include <rte_eth_ctrl.h>
23 #include <rte_ethdev_driver.h>
24 #include <rte_flow.h>
25 #include <rte_flow_driver.h>
26 #include <rte_malloc.h>
27 #include <rte_ip.h>
28 #include <rte_gre.h>
29
30 #include "mlx5.h"
31 #include "mlx5_defs.h"
32 #include "mlx5_prm.h"
33 #include "mlx5_glue.h"
34 #include "mlx5_flow.h"
35 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
36
37 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
38 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
39 #endif
40
41 union flow_dv_attr {
42         struct {
43                 uint32_t valid:1;
44                 uint32_t ipv4:1;
45                 uint32_t ipv6:1;
46                 uint32_t tcp:1;
47                 uint32_t udp:1;
48                 uint32_t reserved:27;
49         };
50         uint32_t attr;
51 };
52
53 /**
54  * Initialize flow attributes structure according to flow items' types.
55  *
56  * @param[in] item
57  *   Pointer to item specification.
58  * @param[out] attr
59  *   Pointer to flow attributes structure.
60  */
61 static void
62 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr)
63 {
64         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
65                 switch (item->type) {
66                 case RTE_FLOW_ITEM_TYPE_IPV4:
67                         attr->ipv4 = 1;
68                         break;
69                 case RTE_FLOW_ITEM_TYPE_IPV6:
70                         attr->ipv6 = 1;
71                         break;
72                 case RTE_FLOW_ITEM_TYPE_UDP:
73                         attr->udp = 1;
74                         break;
75                 case RTE_FLOW_ITEM_TYPE_TCP:
76                         attr->tcp = 1;
77                         break;
78                 default:
79                         break;
80                 }
81         }
82         attr->valid = 1;
83 }
84
85 struct field_modify_info {
86         uint32_t size; /* Size of field in protocol header, in bytes. */
87         uint32_t offset; /* Offset of field in protocol header, in bytes. */
88         enum mlx5_modification_field id;
89 };
90
91 struct field_modify_info modify_eth[] = {
92         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
93         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
94         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
95         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
96         {0, 0, 0},
97 };
98
99 struct field_modify_info modify_ipv4[] = {
100         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
101         {4, 12, MLX5_MODI_OUT_SIPV4},
102         {4, 16, MLX5_MODI_OUT_DIPV4},
103         {0, 0, 0},
104 };
105
106 struct field_modify_info modify_ipv6[] = {
107         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
108         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
109         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
110         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
111         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
112         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
113         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
114         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
115         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
116         {0, 0, 0},
117 };
118
119 struct field_modify_info modify_udp[] = {
120         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
121         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
122         {0, 0, 0},
123 };
124
125 struct field_modify_info modify_tcp[] = {
126         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
127         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
128         {0, 0, 0},
129 };
130
131 /**
132  * Acquire the synchronizing object to protect multithreaded access
133  * to shared dv context. Lock occurs only if context is actually
134  * shared, i.e. we have multiport IB device and representors are
135  * created.
136  *
137  * @param[in] dev
138  *   Pointer to the rte_eth_dev structure.
139  */
140 static void
141 flow_d_shared_lock(struct rte_eth_dev *dev)
142 {
143         struct mlx5_priv *priv = dev->data->dev_private;
144         struct mlx5_ibv_shared *sh = priv->sh;
145
146         if (sh->dv_refcnt > 1) {
147                 int ret;
148
149                 ret = pthread_mutex_lock(&sh->dv_mutex);
150                 assert(!ret);
151                 (void)ret;
152         }
153 }
154
155 static void
156 flow_d_shared_unlock(struct rte_eth_dev *dev)
157 {
158         struct mlx5_priv *priv = dev->data->dev_private;
159         struct mlx5_ibv_shared *sh = priv->sh;
160
161         if (sh->dv_refcnt > 1) {
162                 int ret;
163
164                 ret = pthread_mutex_unlock(&sh->dv_mutex);
165                 assert(!ret);
166                 (void)ret;
167         }
168 }
169
170 /**
171  * Convert modify-header action to DV specification.
172  *
173  * @param[in] item
174  *   Pointer to item specification.
175  * @param[in] field
176  *   Pointer to field modification information.
177  * @param[in,out] resource
178  *   Pointer to the modify-header resource.
179  * @param[in] type
180  *   Type of modification.
181  * @param[out] error
182  *   Pointer to the error structure.
183  *
184  * @return
185  *   0 on success, a negative errno value otherwise and rte_errno is set.
186  */
187 static int
188 flow_dv_convert_modify_action(struct rte_flow_item *item,
189                               struct field_modify_info *field,
190                               struct mlx5_flow_dv_modify_hdr_resource *resource,
191                               uint32_t type,
192                               struct rte_flow_error *error)
193 {
194         uint32_t i = resource->actions_num;
195         struct mlx5_modification_cmd *actions = resource->actions;
196         const uint8_t *spec = item->spec;
197         const uint8_t *mask = item->mask;
198         uint32_t set;
199
200         while (field->size) {
201                 set = 0;
202                 /* Generate modify command for each mask segment. */
203                 memcpy(&set, &mask[field->offset], field->size);
204                 if (set) {
205                         if (i >= MLX5_MODIFY_NUM)
206                                 return rte_flow_error_set(error, EINVAL,
207                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
208                                          "too many items to modify");
209                         actions[i].action_type = type;
210                         actions[i].field = field->id;
211                         actions[i].length = field->size ==
212                                         4 ? 0 : field->size * 8;
213                         rte_memcpy(&actions[i].data[4 - field->size],
214                                    &spec[field->offset], field->size);
215                         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
216                         ++i;
217                 }
218                 if (resource->actions_num != i)
219                         resource->actions_num = i;
220                 field++;
221         }
222         if (!resource->actions_num)
223                 return rte_flow_error_set(error, EINVAL,
224                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
225                                           "invalid modification flow item");
226         return 0;
227 }
228
229 /**
230  * Convert modify-header set IPv4 address action to DV specification.
231  *
232  * @param[in,out] resource
233  *   Pointer to the modify-header resource.
234  * @param[in] action
235  *   Pointer to action specification.
236  * @param[out] error
237  *   Pointer to the error structure.
238  *
239  * @return
240  *   0 on success, a negative errno value otherwise and rte_errno is set.
241  */
242 static int
243 flow_dv_convert_action_modify_ipv4
244                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
245                          const struct rte_flow_action *action,
246                          struct rte_flow_error *error)
247 {
248         const struct rte_flow_action_set_ipv4 *conf =
249                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
250         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
251         struct rte_flow_item_ipv4 ipv4;
252         struct rte_flow_item_ipv4 ipv4_mask;
253
254         memset(&ipv4, 0, sizeof(ipv4));
255         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
256         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
257                 ipv4.hdr.src_addr = conf->ipv4_addr;
258                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
259         } else {
260                 ipv4.hdr.dst_addr = conf->ipv4_addr;
261                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
262         }
263         item.spec = &ipv4;
264         item.mask = &ipv4_mask;
265         return flow_dv_convert_modify_action(&item, modify_ipv4, resource,
266                                              MLX5_MODIFICATION_TYPE_SET, error);
267 }
268
269 /**
270  * Convert modify-header set IPv6 address action to DV specification.
271  *
272  * @param[in,out] resource
273  *   Pointer to the modify-header resource.
274  * @param[in] action
275  *   Pointer to action specification.
276  * @param[out] error
277  *   Pointer to the error structure.
278  *
279  * @return
280  *   0 on success, a negative errno value otherwise and rte_errno is set.
281  */
282 static int
283 flow_dv_convert_action_modify_ipv6
284                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
285                          const struct rte_flow_action *action,
286                          struct rte_flow_error *error)
287 {
288         const struct rte_flow_action_set_ipv6 *conf =
289                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
290         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
291         struct rte_flow_item_ipv6 ipv6;
292         struct rte_flow_item_ipv6 ipv6_mask;
293
294         memset(&ipv6, 0, sizeof(ipv6));
295         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
296         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
297                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
298                        sizeof(ipv6.hdr.src_addr));
299                 memcpy(&ipv6_mask.hdr.src_addr,
300                        &rte_flow_item_ipv6_mask.hdr.src_addr,
301                        sizeof(ipv6.hdr.src_addr));
302         } else {
303                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
304                        sizeof(ipv6.hdr.dst_addr));
305                 memcpy(&ipv6_mask.hdr.dst_addr,
306                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
307                        sizeof(ipv6.hdr.dst_addr));
308         }
309         item.spec = &ipv6;
310         item.mask = &ipv6_mask;
311         return flow_dv_convert_modify_action(&item, modify_ipv6, resource,
312                                              MLX5_MODIFICATION_TYPE_SET, error);
313 }
314
315 /**
316  * Convert modify-header set MAC address action to DV specification.
317  *
318  * @param[in,out] resource
319  *   Pointer to the modify-header resource.
320  * @param[in] action
321  *   Pointer to action specification.
322  * @param[out] error
323  *   Pointer to the error structure.
324  *
325  * @return
326  *   0 on success, a negative errno value otherwise and rte_errno is set.
327  */
328 static int
329 flow_dv_convert_action_modify_mac
330                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
331                          const struct rte_flow_action *action,
332                          struct rte_flow_error *error)
333 {
334         const struct rte_flow_action_set_mac *conf =
335                 (const struct rte_flow_action_set_mac *)(action->conf);
336         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
337         struct rte_flow_item_eth eth;
338         struct rte_flow_item_eth eth_mask;
339
340         memset(&eth, 0, sizeof(eth));
341         memset(&eth_mask, 0, sizeof(eth_mask));
342         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
343                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
344                        sizeof(eth.src.addr_bytes));
345                 memcpy(&eth_mask.src.addr_bytes,
346                        &rte_flow_item_eth_mask.src.addr_bytes,
347                        sizeof(eth_mask.src.addr_bytes));
348         } else {
349                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
350                        sizeof(eth.dst.addr_bytes));
351                 memcpy(&eth_mask.dst.addr_bytes,
352                        &rte_flow_item_eth_mask.dst.addr_bytes,
353                        sizeof(eth_mask.dst.addr_bytes));
354         }
355         item.spec = &eth;
356         item.mask = &eth_mask;
357         return flow_dv_convert_modify_action(&item, modify_eth, resource,
358                                              MLX5_MODIFICATION_TYPE_SET, error);
359 }
360
361 /**
362  * Convert modify-header set TP action to DV specification.
363  *
364  * @param[in,out] resource
365  *   Pointer to the modify-header resource.
366  * @param[in] action
367  *   Pointer to action specification.
368  * @param[in] items
369  *   Pointer to rte_flow_item objects list.
370  * @param[in] attr
371  *   Pointer to flow attributes structure.
372  * @param[out] error
373  *   Pointer to the error structure.
374  *
375  * @return
376  *   0 on success, a negative errno value otherwise and rte_errno is set.
377  */
378 static int
379 flow_dv_convert_action_modify_tp
380                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
381                          const struct rte_flow_action *action,
382                          const struct rte_flow_item *items,
383                          union flow_dv_attr *attr,
384                          struct rte_flow_error *error)
385 {
386         const struct rte_flow_action_set_tp *conf =
387                 (const struct rte_flow_action_set_tp *)(action->conf);
388         struct rte_flow_item item;
389         struct rte_flow_item_udp udp;
390         struct rte_flow_item_udp udp_mask;
391         struct rte_flow_item_tcp tcp;
392         struct rte_flow_item_tcp tcp_mask;
393         struct field_modify_info *field;
394
395         if (!attr->valid)
396                 flow_dv_attr_init(items, attr);
397         if (attr->udp) {
398                 memset(&udp, 0, sizeof(udp));
399                 memset(&udp_mask, 0, sizeof(udp_mask));
400                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
401                         udp.hdr.src_port = conf->port;
402                         udp_mask.hdr.src_port =
403                                         rte_flow_item_udp_mask.hdr.src_port;
404                 } else {
405                         udp.hdr.dst_port = conf->port;
406                         udp_mask.hdr.dst_port =
407                                         rte_flow_item_udp_mask.hdr.dst_port;
408                 }
409                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
410                 item.spec = &udp;
411                 item.mask = &udp_mask;
412                 field = modify_udp;
413         }
414         if (attr->tcp) {
415                 memset(&tcp, 0, sizeof(tcp));
416                 memset(&tcp_mask, 0, sizeof(tcp_mask));
417                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
418                         tcp.hdr.src_port = conf->port;
419                         tcp_mask.hdr.src_port =
420                                         rte_flow_item_tcp_mask.hdr.src_port;
421                 } else {
422                         tcp.hdr.dst_port = conf->port;
423                         tcp_mask.hdr.dst_port =
424                                         rte_flow_item_tcp_mask.hdr.dst_port;
425                 }
426                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
427                 item.spec = &tcp;
428                 item.mask = &tcp_mask;
429                 field = modify_tcp;
430         }
431         return flow_dv_convert_modify_action(&item, field, resource,
432                                              MLX5_MODIFICATION_TYPE_SET, error);
433 }
434
435 /**
436  * Convert modify-header set TTL action to DV specification.
437  *
438  * @param[in,out] resource
439  *   Pointer to the modify-header resource.
440  * @param[in] action
441  *   Pointer to action specification.
442  * @param[in] items
443  *   Pointer to rte_flow_item objects list.
444  * @param[in] attr
445  *   Pointer to flow attributes structure.
446  * @param[out] error
447  *   Pointer to the error structure.
448  *
449  * @return
450  *   0 on success, a negative errno value otherwise and rte_errno is set.
451  */
452 static int
453 flow_dv_convert_action_modify_ttl
454                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
455                          const struct rte_flow_action *action,
456                          const struct rte_flow_item *items,
457                          union flow_dv_attr *attr,
458                          struct rte_flow_error *error)
459 {
460         const struct rte_flow_action_set_ttl *conf =
461                 (const struct rte_flow_action_set_ttl *)(action->conf);
462         struct rte_flow_item item;
463         struct rte_flow_item_ipv4 ipv4;
464         struct rte_flow_item_ipv4 ipv4_mask;
465         struct rte_flow_item_ipv6 ipv6;
466         struct rte_flow_item_ipv6 ipv6_mask;
467         struct field_modify_info *field;
468
469         if (!attr->valid)
470                 flow_dv_attr_init(items, attr);
471         if (attr->ipv4) {
472                 memset(&ipv4, 0, sizeof(ipv4));
473                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
474                 ipv4.hdr.time_to_live = conf->ttl_value;
475                 ipv4_mask.hdr.time_to_live = 0xFF;
476                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
477                 item.spec = &ipv4;
478                 item.mask = &ipv4_mask;
479                 field = modify_ipv4;
480         }
481         if (attr->ipv6) {
482                 memset(&ipv6, 0, sizeof(ipv6));
483                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
484                 ipv6.hdr.hop_limits = conf->ttl_value;
485                 ipv6_mask.hdr.hop_limits = 0xFF;
486                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
487                 item.spec = &ipv6;
488                 item.mask = &ipv6_mask;
489                 field = modify_ipv6;
490         }
491         return flow_dv_convert_modify_action(&item, field, resource,
492                                              MLX5_MODIFICATION_TYPE_SET, error);
493 }
494
495 /**
496  * Convert modify-header decrement TTL action to DV specification.
497  *
498  * @param[in,out] resource
499  *   Pointer to the modify-header resource.
500  * @param[in] action
501  *   Pointer to action specification.
502  * @param[in] items
503  *   Pointer to rte_flow_item objects list.
504  * @param[in] attr
505  *   Pointer to flow attributes structure.
506  * @param[out] error
507  *   Pointer to the error structure.
508  *
509  * @return
510  *   0 on success, a negative errno value otherwise and rte_errno is set.
511  */
512 static int
513 flow_dv_convert_action_modify_dec_ttl
514                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
515                          const struct rte_flow_item *items,
516                          union flow_dv_attr *attr,
517                          struct rte_flow_error *error)
518 {
519         struct rte_flow_item item;
520         struct rte_flow_item_ipv4 ipv4;
521         struct rte_flow_item_ipv4 ipv4_mask;
522         struct rte_flow_item_ipv6 ipv6;
523         struct rte_flow_item_ipv6 ipv6_mask;
524         struct field_modify_info *field;
525
526         if (!attr->valid)
527                 flow_dv_attr_init(items, attr);
528         if (attr->ipv4) {
529                 memset(&ipv4, 0, sizeof(ipv4));
530                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
531                 ipv4.hdr.time_to_live = 0xFF;
532                 ipv4_mask.hdr.time_to_live = 0xFF;
533                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
534                 item.spec = &ipv4;
535                 item.mask = &ipv4_mask;
536                 field = modify_ipv4;
537         }
538         if (attr->ipv6) {
539                 memset(&ipv6, 0, sizeof(ipv6));
540                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
541                 ipv6.hdr.hop_limits = 0xFF;
542                 ipv6_mask.hdr.hop_limits = 0xFF;
543                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
544                 item.spec = &ipv6;
545                 item.mask = &ipv6_mask;
546                 field = modify_ipv6;
547         }
548         return flow_dv_convert_modify_action(&item, field, resource,
549                                              MLX5_MODIFICATION_TYPE_ADD, error);
550 }
551
552 /**
553  * Validate META item.
554  *
555  * @param[in] dev
556  *   Pointer to the rte_eth_dev structure.
557  * @param[in] item
558  *   Item specification.
559  * @param[in] attr
560  *   Attributes of flow that includes this item.
561  * @param[out] error
562  *   Pointer to error structure.
563  *
564  * @return
565  *   0 on success, a negative errno value otherwise and rte_errno is set.
566  */
567 static int
568 flow_dv_validate_item_meta(struct rte_eth_dev *dev,
569                            const struct rte_flow_item *item,
570                            const struct rte_flow_attr *attr,
571                            struct rte_flow_error *error)
572 {
573         const struct rte_flow_item_meta *spec = item->spec;
574         const struct rte_flow_item_meta *mask = item->mask;
575         const struct rte_flow_item_meta nic_mask = {
576                 .data = RTE_BE32(UINT32_MAX)
577         };
578         int ret;
579         uint64_t offloads = dev->data->dev_conf.txmode.offloads;
580
581         if (!(offloads & DEV_TX_OFFLOAD_MATCH_METADATA))
582                 return rte_flow_error_set(error, EPERM,
583                                           RTE_FLOW_ERROR_TYPE_ITEM,
584                                           NULL,
585                                           "match on metadata offload "
586                                           "configuration is off for this port");
587         if (!spec)
588                 return rte_flow_error_set(error, EINVAL,
589                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
590                                           item->spec,
591                                           "data cannot be empty");
592         if (!spec->data)
593                 return rte_flow_error_set(error, EINVAL,
594                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
595                                           NULL,
596                                           "data cannot be zero");
597         if (!mask)
598                 mask = &rte_flow_item_meta_mask;
599         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
600                                         (const uint8_t *)&nic_mask,
601                                         sizeof(struct rte_flow_item_meta),
602                                         error);
603         if (ret < 0)
604                 return ret;
605         if (attr->ingress)
606                 return rte_flow_error_set(error, ENOTSUP,
607                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
608                                           NULL,
609                                           "pattern not supported for ingress");
610         return 0;
611 }
612
613 /**
614  * Validate count action.
615  *
616  * @param[in] dev
617  *   device otr.
618  * @param[out] error
619  *   Pointer to error structure.
620  *
621  * @return
622  *   0 on success, a negative errno value otherwise and rte_errno is set.
623  */
624 static int
625 flow_dv_validate_action_count(struct rte_eth_dev *dev,
626                               struct rte_flow_error *error)
627 {
628         struct mlx5_priv *priv = dev->data->dev_private;
629
630         if (!priv->config.devx)
631                 goto notsup_err;
632 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
633         return 0;
634 #endif
635 notsup_err:
636         return rte_flow_error_set
637                       (error, ENOTSUP,
638                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
639                        NULL,
640                        "count action not supported");
641 }
642
643 /**
644  * Validate the L2 encap action.
645  *
646  * @param[in] action_flags
647  *   Holds the actions detected until now.
648  * @param[in] action
649  *   Pointer to the encap action.
650  * @param[in] attr
651  *   Pointer to flow attributes
652  * @param[out] error
653  *   Pointer to error structure.
654  *
655  * @return
656  *   0 on success, a negative errno value otherwise and rte_errno is set.
657  */
658 static int
659 flow_dv_validate_action_l2_encap(uint64_t action_flags,
660                                  const struct rte_flow_action *action,
661                                  const struct rte_flow_attr *attr,
662                                  struct rte_flow_error *error)
663 {
664         if (!(action->conf))
665                 return rte_flow_error_set(error, EINVAL,
666                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
667                                           "configuration cannot be null");
668         if (action_flags & MLX5_FLOW_ACTION_DROP)
669                 return rte_flow_error_set(error, EINVAL,
670                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
671                                           "can't drop and encap in same flow");
672         if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
673                 return rte_flow_error_set(error, EINVAL,
674                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
675                                           "can only have a single encap or"
676                                           " decap action in a flow");
677         if (attr->ingress)
678                 return rte_flow_error_set(error, ENOTSUP,
679                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
680                                           NULL,
681                                           "encap action not supported for "
682                                           "ingress");
683         return 0;
684 }
685
686 /**
687  * Validate the L2 decap action.
688  *
689  * @param[in] action_flags
690  *   Holds the actions detected until now.
691  * @param[in] attr
692  *   Pointer to flow attributes
693  * @param[out] error
694  *   Pointer to error structure.
695  *
696  * @return
697  *   0 on success, a negative errno value otherwise and rte_errno is set.
698  */
699 static int
700 flow_dv_validate_action_l2_decap(uint64_t action_flags,
701                                  const struct rte_flow_attr *attr,
702                                  struct rte_flow_error *error)
703 {
704         if (action_flags & MLX5_FLOW_ACTION_DROP)
705                 return rte_flow_error_set(error, EINVAL,
706                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
707                                           "can't drop and decap in same flow");
708         if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
709                 return rte_flow_error_set(error, EINVAL,
710                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
711                                           "can only have a single encap or"
712                                           " decap action in a flow");
713         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
714                 return rte_flow_error_set(error, EINVAL,
715                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
716                                           "can't have decap action after"
717                                           " modify action");
718         if (attr->egress)
719                 return rte_flow_error_set(error, ENOTSUP,
720                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
721                                           NULL,
722                                           "decap action not supported for "
723                                           "egress");
724         return 0;
725 }
726
727 /**
728  * Validate the raw encap action.
729  *
730  * @param[in] action_flags
731  *   Holds the actions detected until now.
732  * @param[in] action
733  *   Pointer to the encap action.
734  * @param[in] attr
735  *   Pointer to flow attributes
736  * @param[out] error
737  *   Pointer to error structure.
738  *
739  * @return
740  *   0 on success, a negative errno value otherwise and rte_errno is set.
741  */
742 static int
743 flow_dv_validate_action_raw_encap(uint64_t action_flags,
744                                   const struct rte_flow_action *action,
745                                   const struct rte_flow_attr *attr,
746                                   struct rte_flow_error *error)
747 {
748         if (!(action->conf))
749                 return rte_flow_error_set(error, EINVAL,
750                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
751                                           "configuration cannot be null");
752         if (action_flags & MLX5_FLOW_ACTION_DROP)
753                 return rte_flow_error_set(error, EINVAL,
754                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
755                                           "can't drop and encap in same flow");
756         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
757                 return rte_flow_error_set(error, EINVAL,
758                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
759                                           "can only have a single encap"
760                                           " action in a flow");
761         /* encap without preceding decap is not supported for ingress */
762         if (attr->ingress && !(action_flags & MLX5_FLOW_ACTION_RAW_DECAP))
763                 return rte_flow_error_set(error, ENOTSUP,
764                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
765                                           NULL,
766                                           "encap action not supported for "
767                                           "ingress");
768         return 0;
769 }
770
771 /**
772  * Validate the raw decap action.
773  *
774  * @param[in] action_flags
775  *   Holds the actions detected until now.
776  * @param[in] action
777  *   Pointer to the encap action.
778  * @param[in] attr
779  *   Pointer to flow attributes
780  * @param[out] error
781  *   Pointer to error structure.
782  *
783  * @return
784  *   0 on success, a negative errno value otherwise and rte_errno is set.
785  */
786 static int
787 flow_dv_validate_action_raw_decap(uint64_t action_flags,
788                                   const struct rte_flow_action *action,
789                                   const struct rte_flow_attr *attr,
790                                   struct rte_flow_error *error)
791 {
792         if (action_flags & MLX5_FLOW_ACTION_DROP)
793                 return rte_flow_error_set(error, EINVAL,
794                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
795                                           "can't drop and decap in same flow");
796         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
797                 return rte_flow_error_set(error, EINVAL,
798                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
799                                           "can't have encap action before"
800                                           " decap action");
801         if (action_flags & MLX5_FLOW_DECAP_ACTIONS)
802                 return rte_flow_error_set(error, EINVAL,
803                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
804                                           "can only have a single decap"
805                                           " action in a flow");
806         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
807                 return rte_flow_error_set(error, EINVAL,
808                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
809                                           "can't have decap action after"
810                                           " modify action");
811         /* decap action is valid on egress only if it is followed by encap */
812         if (attr->egress) {
813                 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
814                        action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
815                        action++) {
816                 }
817                 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
818                         return rte_flow_error_set
819                                         (error, ENOTSUP,
820                                          RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
821                                          NULL, "decap action not supported"
822                                          " for egress");
823         }
824         return 0;
825 }
826
827 /**
828  * Find existing encap/decap resource or create and register a new one.
829  *
830  * @param dev[in, out]
831  *   Pointer to rte_eth_dev structure.
832  * @param[in, out] resource
833  *   Pointer to encap/decap resource.
834  * @parm[in, out] dev_flow
835  *   Pointer to the dev_flow.
836  * @param[out] error
837  *   pointer to error structure.
838  *
839  * @return
840  *   0 on success otherwise -errno and errno is set.
841  */
842 static int
843 flow_dv_encap_decap_resource_register
844                         (struct rte_eth_dev *dev,
845                          struct mlx5_flow_dv_encap_decap_resource *resource,
846                          struct mlx5_flow *dev_flow,
847                          struct rte_flow_error *error)
848 {
849         struct mlx5_priv *priv = dev->data->dev_private;
850         struct mlx5_ibv_shared *sh = priv->sh;
851         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
852         struct rte_flow *flow = dev_flow->flow;
853         struct mlx5dv_dr_ns *ns;
854
855         resource->flags = flow->group ? 0 : 1;
856         if (flow->ingress)
857                 ns = sh->rx_ns;
858         else
859                 ns = sh->tx_ns;
860
861         /* Lookup a matching resource from cache. */
862         LIST_FOREACH(cache_resource, &sh->encaps_decaps, next) {
863                 if (resource->reformat_type == cache_resource->reformat_type &&
864                     resource->ft_type == cache_resource->ft_type &&
865                     resource->flags == cache_resource->flags &&
866                     resource->size == cache_resource->size &&
867                     !memcmp((const void *)resource->buf,
868                             (const void *)cache_resource->buf,
869                             resource->size)) {
870                         DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
871                                 (void *)cache_resource,
872                                 rte_atomic32_read(&cache_resource->refcnt));
873                         rte_atomic32_inc(&cache_resource->refcnt);
874                         dev_flow->dv.encap_decap = cache_resource;
875                         return 0;
876                 }
877         }
878         /* Register new encap/decap resource. */
879         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
880         if (!cache_resource)
881                 return rte_flow_error_set(error, ENOMEM,
882                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
883                                           "cannot allocate resource memory");
884         *cache_resource = *resource;
885         cache_resource->verbs_action =
886                 mlx5_glue->dv_create_flow_action_packet_reformat
887                         (sh->ctx, cache_resource->reformat_type,
888                          cache_resource->ft_type, ns, cache_resource->flags,
889                          cache_resource->size,
890                          (cache_resource->size ? cache_resource->buf : NULL));
891         if (!cache_resource->verbs_action) {
892                 rte_free(cache_resource);
893                 return rte_flow_error_set(error, ENOMEM,
894                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
895                                           NULL, "cannot create action");
896         }
897         rte_atomic32_init(&cache_resource->refcnt);
898         rte_atomic32_inc(&cache_resource->refcnt);
899         LIST_INSERT_HEAD(&sh->encaps_decaps, cache_resource, next);
900         dev_flow->dv.encap_decap = cache_resource;
901         DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
902                 (void *)cache_resource,
903                 rte_atomic32_read(&cache_resource->refcnt));
904         return 0;
905 }
906
907 /**
908  * Find existing table jump resource or create and register a new one.
909  *
910  * @param dev[in, out]
911  *   Pointer to rte_eth_dev structure.
912  * @param[in, out] resource
913  *   Pointer to jump table resource.
914  * @parm[in, out] dev_flow
915  *   Pointer to the dev_flow.
916  * @param[out] error
917  *   pointer to error structure.
918  *
919  * @return
920  *   0 on success otherwise -errno and errno is set.
921  */
922 static int
923 flow_dv_jump_tbl_resource_register
924                         (struct rte_eth_dev *dev,
925                          struct mlx5_flow_dv_jump_tbl_resource *resource,
926                          struct mlx5_flow *dev_flow,
927                          struct rte_flow_error *error)
928 {
929         struct mlx5_priv *priv = dev->data->dev_private;
930         struct mlx5_ibv_shared *sh = priv->sh;
931         struct mlx5_flow_dv_jump_tbl_resource *cache_resource;
932
933         /* Lookup a matching resource from cache. */
934         LIST_FOREACH(cache_resource, &sh->jump_tbl, next) {
935                 if (resource->tbl == cache_resource->tbl) {
936                         DRV_LOG(DEBUG, "jump table resource resource %p: refcnt %d++",
937                                 (void *)cache_resource,
938                                 rte_atomic32_read(&cache_resource->refcnt));
939                         rte_atomic32_inc(&cache_resource->refcnt);
940                         dev_flow->dv.jump = cache_resource;
941                         return 0;
942                 }
943         }
944         /* Register new jump table resource. */
945         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
946         if (!cache_resource)
947                 return rte_flow_error_set(error, ENOMEM,
948                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
949                                           "cannot allocate resource memory");
950         *cache_resource = *resource;
951         cache_resource->action =
952                 mlx5_glue->dr_create_flow_action_dest_flow_tbl
953                 (resource->tbl->obj);
954         if (!cache_resource->action) {
955                 rte_free(cache_resource);
956                 return rte_flow_error_set(error, ENOMEM,
957                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
958                                           NULL, "cannot create action");
959         }
960         rte_atomic32_init(&cache_resource->refcnt);
961         rte_atomic32_inc(&cache_resource->refcnt);
962         LIST_INSERT_HEAD(&sh->jump_tbl, cache_resource, next);
963         dev_flow->dv.jump = cache_resource;
964         DRV_LOG(DEBUG, "new jump table  resource %p: refcnt %d++",
965                 (void *)cache_resource,
966                 rte_atomic32_read(&cache_resource->refcnt));
967         return 0;
968 }
969
970 /**
971  * Get the size of specific rte_flow_item_type
972  *
973  * @param[in] item_type
974  *   Tested rte_flow_item_type.
975  *
976  * @return
977  *   sizeof struct item_type, 0 if void or irrelevant.
978  */
979 static size_t
980 flow_dv_get_item_len(const enum rte_flow_item_type item_type)
981 {
982         size_t retval;
983
984         switch (item_type) {
985         case RTE_FLOW_ITEM_TYPE_ETH:
986                 retval = sizeof(struct rte_flow_item_eth);
987                 break;
988         case RTE_FLOW_ITEM_TYPE_VLAN:
989                 retval = sizeof(struct rte_flow_item_vlan);
990                 break;
991         case RTE_FLOW_ITEM_TYPE_IPV4:
992                 retval = sizeof(struct rte_flow_item_ipv4);
993                 break;
994         case RTE_FLOW_ITEM_TYPE_IPV6:
995                 retval = sizeof(struct rte_flow_item_ipv6);
996                 break;
997         case RTE_FLOW_ITEM_TYPE_UDP:
998                 retval = sizeof(struct rte_flow_item_udp);
999                 break;
1000         case RTE_FLOW_ITEM_TYPE_TCP:
1001                 retval = sizeof(struct rte_flow_item_tcp);
1002                 break;
1003         case RTE_FLOW_ITEM_TYPE_VXLAN:
1004                 retval = sizeof(struct rte_flow_item_vxlan);
1005                 break;
1006         case RTE_FLOW_ITEM_TYPE_GRE:
1007                 retval = sizeof(struct rte_flow_item_gre);
1008                 break;
1009         case RTE_FLOW_ITEM_TYPE_NVGRE:
1010                 retval = sizeof(struct rte_flow_item_nvgre);
1011                 break;
1012         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1013                 retval = sizeof(struct rte_flow_item_vxlan_gpe);
1014                 break;
1015         case RTE_FLOW_ITEM_TYPE_MPLS:
1016                 retval = sizeof(struct rte_flow_item_mpls);
1017                 break;
1018         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
1019         default:
1020                 retval = 0;
1021                 break;
1022         }
1023         return retval;
1024 }
1025
1026 #define MLX5_ENCAP_IPV4_VERSION         0x40
1027 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
1028 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
1029 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
1030 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
1031 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
1032 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
1033
1034 /**
1035  * Convert the encap action data from list of rte_flow_item to raw buffer
1036  *
1037  * @param[in] items
1038  *   Pointer to rte_flow_item objects list.
1039  * @param[out] buf
1040  *   Pointer to the output buffer.
1041  * @param[out] size
1042  *   Pointer to the output buffer size.
1043  * @param[out] error
1044  *   Pointer to the error structure.
1045  *
1046  * @return
1047  *   0 on success, a negative errno value otherwise and rte_errno is set.
1048  */
1049 static int
1050 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
1051                            size_t *size, struct rte_flow_error *error)
1052 {
1053         struct ether_hdr *eth = NULL;
1054         struct vlan_hdr *vlan = NULL;
1055         struct ipv4_hdr *ipv4 = NULL;
1056         struct ipv6_hdr *ipv6 = NULL;
1057         struct udp_hdr *udp = NULL;
1058         struct vxlan_hdr *vxlan = NULL;
1059         struct vxlan_gpe_hdr *vxlan_gpe = NULL;
1060         struct gre_hdr *gre = NULL;
1061         size_t len;
1062         size_t temp_size = 0;
1063
1064         if (!items)
1065                 return rte_flow_error_set(error, EINVAL,
1066                                           RTE_FLOW_ERROR_TYPE_ACTION,
1067                                           NULL, "invalid empty data");
1068         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1069                 len = flow_dv_get_item_len(items->type);
1070                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
1071                         return rte_flow_error_set(error, EINVAL,
1072                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1073                                                   (void *)items->type,
1074                                                   "items total size is too big"
1075                                                   " for encap action");
1076                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
1077                 switch (items->type) {
1078                 case RTE_FLOW_ITEM_TYPE_ETH:
1079                         eth = (struct ether_hdr *)&buf[temp_size];
1080                         break;
1081                 case RTE_FLOW_ITEM_TYPE_VLAN:
1082                         vlan = (struct vlan_hdr *)&buf[temp_size];
1083                         if (!eth)
1084                                 return rte_flow_error_set(error, EINVAL,
1085                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1086                                                 (void *)items->type,
1087                                                 "eth header not found");
1088                         if (!eth->ether_type)
1089                                 eth->ether_type = RTE_BE16(ETHER_TYPE_VLAN);
1090                         break;
1091                 case RTE_FLOW_ITEM_TYPE_IPV4:
1092                         ipv4 = (struct ipv4_hdr *)&buf[temp_size];
1093                         if (!vlan && !eth)
1094                                 return rte_flow_error_set(error, EINVAL,
1095                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1096                                                 (void *)items->type,
1097                                                 "neither eth nor vlan"
1098                                                 " header found");
1099                         if (vlan && !vlan->eth_proto)
1100                                 vlan->eth_proto = RTE_BE16(ETHER_TYPE_IPv4);
1101                         else if (eth && !eth->ether_type)
1102                                 eth->ether_type = RTE_BE16(ETHER_TYPE_IPv4);
1103                         if (!ipv4->version_ihl)
1104                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
1105                                                     MLX5_ENCAP_IPV4_IHL_MIN;
1106                         if (!ipv4->time_to_live)
1107                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
1108                         break;
1109                 case RTE_FLOW_ITEM_TYPE_IPV6:
1110                         ipv6 = (struct ipv6_hdr *)&buf[temp_size];
1111                         if (!vlan && !eth)
1112                                 return rte_flow_error_set(error, EINVAL,
1113                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1114                                                 (void *)items->type,
1115                                                 "neither eth nor vlan"
1116                                                 " header found");
1117                         if (vlan && !vlan->eth_proto)
1118                                 vlan->eth_proto = RTE_BE16(ETHER_TYPE_IPv6);
1119                         else if (eth && !eth->ether_type)
1120                                 eth->ether_type = RTE_BE16(ETHER_TYPE_IPv6);
1121                         if (!ipv6->vtc_flow)
1122                                 ipv6->vtc_flow =
1123                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
1124                         if (!ipv6->hop_limits)
1125                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
1126                         break;
1127                 case RTE_FLOW_ITEM_TYPE_UDP:
1128                         udp = (struct udp_hdr *)&buf[temp_size];
1129                         if (!ipv4 && !ipv6)
1130                                 return rte_flow_error_set(error, EINVAL,
1131                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1132                                                 (void *)items->type,
1133                                                 "ip header not found");
1134                         if (ipv4 && !ipv4->next_proto_id)
1135                                 ipv4->next_proto_id = IPPROTO_UDP;
1136                         else if (ipv6 && !ipv6->proto)
1137                                 ipv6->proto = IPPROTO_UDP;
1138                         break;
1139                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1140                         vxlan = (struct vxlan_hdr *)&buf[temp_size];
1141                         if (!udp)
1142                                 return rte_flow_error_set(error, EINVAL,
1143                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1144                                                 (void *)items->type,
1145                                                 "udp header not found");
1146                         if (!udp->dst_port)
1147                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
1148                         if (!vxlan->vx_flags)
1149                                 vxlan->vx_flags =
1150                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
1151                         break;
1152                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1153                         vxlan_gpe = (struct vxlan_gpe_hdr *)&buf[temp_size];
1154                         if (!udp)
1155                                 return rte_flow_error_set(error, EINVAL,
1156                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1157                                                 (void *)items->type,
1158                                                 "udp header not found");
1159                         if (!vxlan_gpe->proto)
1160                                 return rte_flow_error_set(error, EINVAL,
1161                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1162                                                 (void *)items->type,
1163                                                 "next protocol not found");
1164                         if (!udp->dst_port)
1165                                 udp->dst_port =
1166                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
1167                         if (!vxlan_gpe->vx_flags)
1168                                 vxlan_gpe->vx_flags =
1169                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
1170                         break;
1171                 case RTE_FLOW_ITEM_TYPE_GRE:
1172                 case RTE_FLOW_ITEM_TYPE_NVGRE:
1173                         gre = (struct gre_hdr *)&buf[temp_size];
1174                         if (!gre->proto)
1175                                 return rte_flow_error_set(error, EINVAL,
1176                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1177                                                 (void *)items->type,
1178                                                 "next protocol not found");
1179                         if (!ipv4 && !ipv6)
1180                                 return rte_flow_error_set(error, EINVAL,
1181                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1182                                                 (void *)items->type,
1183                                                 "ip header not found");
1184                         if (ipv4 && !ipv4->next_proto_id)
1185                                 ipv4->next_proto_id = IPPROTO_GRE;
1186                         else if (ipv6 && !ipv6->proto)
1187                                 ipv6->proto = IPPROTO_GRE;
1188                         break;
1189                 case RTE_FLOW_ITEM_TYPE_VOID:
1190                         break;
1191                 default:
1192                         return rte_flow_error_set(error, EINVAL,
1193                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1194                                                   (void *)items->type,
1195                                                   "unsupported item type");
1196                         break;
1197                 }
1198                 temp_size += len;
1199         }
1200         *size = temp_size;
1201         return 0;
1202 }
1203
1204 /**
1205  * Convert L2 encap action to DV specification.
1206  *
1207  * @param[in] dev
1208  *   Pointer to rte_eth_dev structure.
1209  * @param[in] action
1210  *   Pointer to action structure.
1211  * @param[in, out] dev_flow
1212  *   Pointer to the mlx5_flow.
1213  * @param[out] error
1214  *   Pointer to the error structure.
1215  *
1216  * @return
1217  *   0 on success, a negative errno value otherwise and rte_errno is set.
1218  */
1219 static int
1220 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
1221                                const struct rte_flow_action *action,
1222                                struct mlx5_flow *dev_flow,
1223                                struct rte_flow_error *error)
1224 {
1225         const struct rte_flow_item *encap_data;
1226         const struct rte_flow_action_raw_encap *raw_encap_data;
1227         struct mlx5_flow_dv_encap_decap_resource res = {
1228                 .reformat_type =
1229                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
1230                 .ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
1231         };
1232
1233         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
1234                 raw_encap_data =
1235                         (const struct rte_flow_action_raw_encap *)action->conf;
1236                 res.size = raw_encap_data->size;
1237                 memcpy(res.buf, raw_encap_data->data, res.size);
1238         } else {
1239                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
1240                         encap_data =
1241                                 ((const struct rte_flow_action_vxlan_encap *)
1242                                                 action->conf)->definition;
1243                 else
1244                         encap_data =
1245                                 ((const struct rte_flow_action_nvgre_encap *)
1246                                                 action->conf)->definition;
1247                 if (flow_dv_convert_encap_data(encap_data, res.buf,
1248                                                &res.size, error))
1249                         return -rte_errno;
1250         }
1251         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1252                 return rte_flow_error_set(error, EINVAL,
1253                                           RTE_FLOW_ERROR_TYPE_ACTION,
1254                                           NULL, "can't create L2 encap action");
1255         return 0;
1256 }
1257
1258 /**
1259  * Convert L2 decap action to DV specification.
1260  *
1261  * @param[in] dev
1262  *   Pointer to rte_eth_dev structure.
1263  * @param[in, out] dev_flow
1264  *   Pointer to the mlx5_flow.
1265  * @param[out] error
1266  *   Pointer to the error structure.
1267  *
1268  * @return
1269  *   0 on success, a negative errno value otherwise and rte_errno is set.
1270  */
1271 static int
1272 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
1273                                struct mlx5_flow *dev_flow,
1274                                struct rte_flow_error *error)
1275 {
1276         struct mlx5_flow_dv_encap_decap_resource res = {
1277                 .size = 0,
1278                 .reformat_type =
1279                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
1280                 .ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
1281         };
1282
1283         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1284                 return rte_flow_error_set(error, EINVAL,
1285                                           RTE_FLOW_ERROR_TYPE_ACTION,
1286                                           NULL, "can't create L2 decap action");
1287         return 0;
1288 }
1289
1290 /**
1291  * Convert raw decap/encap (L3 tunnel) action to DV specification.
1292  *
1293  * @param[in] dev
1294  *   Pointer to rte_eth_dev structure.
1295  * @param[in] action
1296  *   Pointer to action structure.
1297  * @param[in, out] dev_flow
1298  *   Pointer to the mlx5_flow.
1299  * @param[in] attr
1300  *   Pointer to the flow attributes.
1301  * @param[out] error
1302  *   Pointer to the error structure.
1303  *
1304  * @return
1305  *   0 on success, a negative errno value otherwise and rte_errno is set.
1306  */
1307 static int
1308 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
1309                                 const struct rte_flow_action *action,
1310                                 struct mlx5_flow *dev_flow,
1311                                 const struct rte_flow_attr *attr,
1312                                 struct rte_flow_error *error)
1313 {
1314         const struct rte_flow_action_raw_encap *encap_data;
1315         struct mlx5_flow_dv_encap_decap_resource res;
1316
1317         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
1318         res.size = encap_data->size;
1319         memcpy(res.buf, encap_data->data, res.size);
1320         res.reformat_type = attr->egress ?
1321                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL :
1322                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
1323         res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
1324                                      MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
1325         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1326                 return rte_flow_error_set(error, EINVAL,
1327                                           RTE_FLOW_ERROR_TYPE_ACTION,
1328                                           NULL, "can't create encap action");
1329         return 0;
1330 }
1331
1332 /**
1333  * Validate the modify-header actions.
1334  *
1335  * @param[in] action_flags
1336  *   Holds the actions detected until now.
1337  * @param[in] action
1338  *   Pointer to the modify action.
1339  * @param[out] error
1340  *   Pointer to error structure.
1341  *
1342  * @return
1343  *   0 on success, a negative errno value otherwise and rte_errno is set.
1344  */
1345 static int
1346 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
1347                                    const struct rte_flow_action *action,
1348                                    struct rte_flow_error *error)
1349 {
1350         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
1351                 return rte_flow_error_set(error, EINVAL,
1352                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1353                                           NULL, "action configuration not set");
1354         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
1355                 return rte_flow_error_set(error, EINVAL,
1356                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1357                                           "can't have encap action before"
1358                                           " modify action");
1359         return 0;
1360 }
1361
1362 /**
1363  * Validate the modify-header MAC address actions.
1364  *
1365  * @param[in] action_flags
1366  *   Holds the actions detected until now.
1367  * @param[in] action
1368  *   Pointer to the modify action.
1369  * @param[in] item_flags
1370  *   Holds the items detected.
1371  * @param[out] error
1372  *   Pointer to error structure.
1373  *
1374  * @return
1375  *   0 on success, a negative errno value otherwise and rte_errno is set.
1376  */
1377 static int
1378 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
1379                                    const struct rte_flow_action *action,
1380                                    const uint64_t item_flags,
1381                                    struct rte_flow_error *error)
1382 {
1383         int ret = 0;
1384
1385         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1386         if (!ret) {
1387                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
1388                         return rte_flow_error_set(error, EINVAL,
1389                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1390                                                   NULL,
1391                                                   "no L2 item in pattern");
1392         }
1393         return ret;
1394 }
1395
1396 /**
1397  * Validate the modify-header IPv4 address actions.
1398  *
1399  * @param[in] action_flags
1400  *   Holds the actions detected until now.
1401  * @param[in] action
1402  *   Pointer to the modify action.
1403  * @param[in] item_flags
1404  *   Holds the items detected.
1405  * @param[out] error
1406  *   Pointer to error structure.
1407  *
1408  * @return
1409  *   0 on success, a negative errno value otherwise and rte_errno is set.
1410  */
1411 static int
1412 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
1413                                     const struct rte_flow_action *action,
1414                                     const uint64_t item_flags,
1415                                     struct rte_flow_error *error)
1416 {
1417         int ret = 0;
1418
1419         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1420         if (!ret) {
1421                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
1422                         return rte_flow_error_set(error, EINVAL,
1423                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1424                                                   NULL,
1425                                                   "no ipv4 item in pattern");
1426         }
1427         return ret;
1428 }
1429
1430 /**
1431  * Validate the modify-header IPv6 address actions.
1432  *
1433  * @param[in] action_flags
1434  *   Holds the actions detected until now.
1435  * @param[in] action
1436  *   Pointer to the modify action.
1437  * @param[in] item_flags
1438  *   Holds the items detected.
1439  * @param[out] error
1440  *   Pointer to error structure.
1441  *
1442  * @return
1443  *   0 on success, a negative errno value otherwise and rte_errno is set.
1444  */
1445 static int
1446 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
1447                                     const struct rte_flow_action *action,
1448                                     const uint64_t item_flags,
1449                                     struct rte_flow_error *error)
1450 {
1451         int ret = 0;
1452
1453         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1454         if (!ret) {
1455                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
1456                         return rte_flow_error_set(error, EINVAL,
1457                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1458                                                   NULL,
1459                                                   "no ipv6 item in pattern");
1460         }
1461         return ret;
1462 }
1463
1464 /**
1465  * Validate the modify-header TP actions.
1466  *
1467  * @param[in] action_flags
1468  *   Holds the actions detected until now.
1469  * @param[in] action
1470  *   Pointer to the modify action.
1471  * @param[in] item_flags
1472  *   Holds the items detected.
1473  * @param[out] error
1474  *   Pointer to error structure.
1475  *
1476  * @return
1477  *   0 on success, a negative errno value otherwise and rte_errno is set.
1478  */
1479 static int
1480 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
1481                                   const struct rte_flow_action *action,
1482                                   const uint64_t item_flags,
1483                                   struct rte_flow_error *error)
1484 {
1485         int ret = 0;
1486
1487         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1488         if (!ret) {
1489                 if (!(item_flags & MLX5_FLOW_LAYER_L4))
1490                         return rte_flow_error_set(error, EINVAL,
1491                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1492                                                   NULL, "no transport layer "
1493                                                   "in pattern");
1494         }
1495         return ret;
1496 }
1497
1498 /**
1499  * Validate the modify-header TTL actions.
1500  *
1501  * @param[in] action_flags
1502  *   Holds the actions detected until now.
1503  * @param[in] action
1504  *   Pointer to the modify action.
1505  * @param[in] item_flags
1506  *   Holds the items detected.
1507  * @param[out] error
1508  *   Pointer to error structure.
1509  *
1510  * @return
1511  *   0 on success, a negative errno value otherwise and rte_errno is set.
1512  */
1513 static int
1514 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
1515                                    const struct rte_flow_action *action,
1516                                    const uint64_t item_flags,
1517                                    struct rte_flow_error *error)
1518 {
1519         int ret = 0;
1520
1521         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1522         if (!ret) {
1523                 if (!(item_flags & MLX5_FLOW_LAYER_L3))
1524                         return rte_flow_error_set(error, EINVAL,
1525                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1526                                                   NULL,
1527                                                   "no IP protocol in pattern");
1528         }
1529         return ret;
1530 }
1531
1532 /**
1533  * Validate jump action.
1534  *
1535  * @param[in] action
1536  *   Pointer to the modify action.
1537  * @param[in] group
1538  *   The group of the current flow.
1539  * @param[out] error
1540  *   Pointer to error structure.
1541  *
1542  * @return
1543  *   0 on success, a negative errno value otherwise and rte_errno is set.
1544  */
1545 static int
1546 flow_dv_validate_action_jump(const struct rte_flow_action *action,
1547                              uint32_t group,
1548                              struct rte_flow_error *error)
1549 {
1550         if (action->type != RTE_FLOW_ACTION_TYPE_JUMP && !action->conf)
1551                 return rte_flow_error_set(error, EINVAL,
1552                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1553                                           NULL, "action configuration not set");
1554         if (group >= ((const struct rte_flow_action_jump *)action->conf)->group)
1555                 return rte_flow_error_set(error, EINVAL,
1556                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1557                                           "target group must be higher then"
1558                                           " the current flow group");
1559         return 0;
1560 }
1561
1562
1563 /**
1564  * Find existing modify-header resource or create and register a new one.
1565  *
1566  * @param dev[in, out]
1567  *   Pointer to rte_eth_dev structure.
1568  * @param[in, out] resource
1569  *   Pointer to modify-header resource.
1570  * @parm[in, out] dev_flow
1571  *   Pointer to the dev_flow.
1572  * @param[out] error
1573  *   pointer to error structure.
1574  *
1575  * @return
1576  *   0 on success otherwise -errno and errno is set.
1577  */
1578 static int
1579 flow_dv_modify_hdr_resource_register
1580                         (struct rte_eth_dev *dev,
1581                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1582                          struct mlx5_flow *dev_flow,
1583                          struct rte_flow_error *error)
1584 {
1585         struct mlx5_priv *priv = dev->data->dev_private;
1586         struct mlx5_ibv_shared *sh = priv->sh;
1587         struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
1588
1589         struct mlx5dv_dr_ns *ns =
1590                 resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX  ?
1591                 sh->tx_ns : sh->rx_ns;
1592
1593         /* Lookup a matching resource from cache. */
1594         LIST_FOREACH(cache_resource, &sh->modify_cmds, next) {
1595                 if (resource->ft_type == cache_resource->ft_type &&
1596                     resource->actions_num == cache_resource->actions_num &&
1597                     !memcmp((const void *)resource->actions,
1598                             (const void *)cache_resource->actions,
1599                             (resource->actions_num *
1600                                             sizeof(resource->actions[0])))) {
1601                         DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++",
1602                                 (void *)cache_resource,
1603                                 rte_atomic32_read(&cache_resource->refcnt));
1604                         rte_atomic32_inc(&cache_resource->refcnt);
1605                         dev_flow->dv.modify_hdr = cache_resource;
1606                         return 0;
1607                 }
1608         }
1609         /* Register new modify-header resource. */
1610         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1611         if (!cache_resource)
1612                 return rte_flow_error_set(error, ENOMEM,
1613                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1614                                           "cannot allocate resource memory");
1615         *cache_resource = *resource;
1616         cache_resource->verbs_action =
1617                 mlx5_glue->dv_create_flow_action_modify_header
1618                                         (sh->ctx, cache_resource->ft_type,
1619                                          ns, 0,
1620                                          cache_resource->actions_num *
1621                                          sizeof(cache_resource->actions[0]),
1622                                          (uint64_t *)cache_resource->actions);
1623         if (!cache_resource->verbs_action) {
1624                 rte_free(cache_resource);
1625                 return rte_flow_error_set(error, ENOMEM,
1626                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1627                                           NULL, "cannot create action");
1628         }
1629         rte_atomic32_init(&cache_resource->refcnt);
1630         rte_atomic32_inc(&cache_resource->refcnt);
1631         LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
1632         dev_flow->dv.modify_hdr = cache_resource;
1633         DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
1634                 (void *)cache_resource,
1635                 rte_atomic32_read(&cache_resource->refcnt));
1636         return 0;
1637 }
1638
1639 /**
1640  * Get or create a flow counter.
1641  *
1642  * @param[in] dev
1643  *   Pointer to the Ethernet device structure.
1644  * @param[in] shared
1645  *   Indicate if this counter is shared with other flows.
1646  * @param[in] id
1647  *   Counter identifier.
1648  *
1649  * @return
1650  *   pointer to flow counter on success, NULL otherwise and rte_errno is set.
1651  */
1652 static struct mlx5_flow_counter *
1653 flow_dv_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
1654 {
1655         struct mlx5_priv *priv = dev->data->dev_private;
1656         struct mlx5_flow_counter *cnt = NULL;
1657         struct mlx5_devx_counter_set *dcs = NULL;
1658         int ret;
1659
1660         if (!priv->config.devx) {
1661                 ret = -ENOTSUP;
1662                 goto error_exit;
1663         }
1664         if (shared) {
1665                 LIST_FOREACH(cnt, &priv->flow_counters, next) {
1666                         if (cnt->shared && cnt->id == id) {
1667                                 cnt->ref_cnt++;
1668                                 return cnt;
1669                         }
1670                 }
1671         }
1672         cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
1673         dcs = rte_calloc(__func__, 1, sizeof(*dcs), 0);
1674         if (!dcs || !cnt) {
1675                 ret = -ENOMEM;
1676                 goto error_exit;
1677         }
1678         ret = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, dcs);
1679         if (ret)
1680                 goto error_exit;
1681         struct mlx5_flow_counter tmpl = {
1682                 .shared = shared,
1683                 .ref_cnt = 1,
1684                 .id = id,
1685                 .dcs = dcs,
1686         };
1687         tmpl.action = mlx5_glue->dv_create_flow_action_counter(dcs->obj, 0);
1688         if (!tmpl.action) {
1689                 ret = errno;
1690                 goto error_exit;
1691         }
1692         *cnt = tmpl;
1693         LIST_INSERT_HEAD(&priv->flow_counters, cnt, next);
1694         return cnt;
1695 error_exit:
1696         rte_free(cnt);
1697         rte_free(dcs);
1698         rte_errno = -ret;
1699         return NULL;
1700 }
1701
1702 /**
1703  * Release a flow counter.
1704  *
1705  * @param[in] counter
1706  *   Pointer to the counter handler.
1707  */
1708 static void
1709 flow_dv_counter_release(struct mlx5_flow_counter *counter)
1710 {
1711         int ret;
1712
1713         if (!counter)
1714                 return;
1715         if (--counter->ref_cnt == 0) {
1716                 ret = mlx5_devx_cmd_flow_counter_free(counter->dcs->obj);
1717                 if (ret)
1718                         DRV_LOG(ERR, "Failed to free devx counters, %d", ret);
1719                 LIST_REMOVE(counter, next);
1720                 rte_free(counter->dcs);
1721                 rte_free(counter);
1722         }
1723 }
1724
1725 /**
1726  * Verify the @p attributes will be correctly understood by the NIC and store
1727  * them in the @p flow if everything is correct.
1728  *
1729  * @param[in] dev
1730  *   Pointer to dev struct.
1731  * @param[in] attributes
1732  *   Pointer to flow attributes
1733  * @param[out] error
1734  *   Pointer to error structure.
1735  *
1736  * @return
1737  *   0 on success, a negative errno value otherwise and rte_errno is set.
1738  */
1739 static int
1740 flow_dv_validate_attributes(struct rte_eth_dev *dev,
1741                             const struct rte_flow_attr *attributes,
1742                             struct rte_flow_error *error)
1743 {
1744         struct mlx5_priv *priv = dev->data->dev_private;
1745         uint32_t priority_max = priv->config.flow_prio - 1;
1746
1747 #ifndef HAVE_MLX5DV_DR
1748         if (attributes->group)
1749                 return rte_flow_error_set(error, ENOTSUP,
1750                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1751                                           NULL,
1752                                           "groups is not supported");
1753 #endif
1754         if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
1755             attributes->priority >= priority_max)
1756                 return rte_flow_error_set(error, ENOTSUP,
1757                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1758                                           NULL,
1759                                           "priority out of range");
1760         if (attributes->transfer)
1761                 return rte_flow_error_set(error, ENOTSUP,
1762                                           RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1763                                           NULL,
1764                                           "transfer is not supported");
1765         if (!(attributes->egress ^ attributes->ingress))
1766                 return rte_flow_error_set(error, ENOTSUP,
1767                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1768                                           "must specify exactly one of "
1769                                           "ingress or egress");
1770         return 0;
1771 }
1772
1773 /**
1774  * Internal validation function. For validating both actions and items.
1775  *
1776  * @param[in] dev
1777  *   Pointer to the rte_eth_dev structure.
1778  * @param[in] attr
1779  *   Pointer to the flow attributes.
1780  * @param[in] items
1781  *   Pointer to the list of items.
1782  * @param[in] actions
1783  *   Pointer to the list of actions.
1784  * @param[out] error
1785  *   Pointer to the error structure.
1786  *
1787  * @return
1788  *   0 on success, a negative errno value otherwise and rte_errno is set.
1789  */
1790 static int
1791 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
1792                  const struct rte_flow_item items[],
1793                  const struct rte_flow_action actions[],
1794                  struct rte_flow_error *error)
1795 {
1796         int ret;
1797         uint64_t action_flags = 0;
1798         uint64_t item_flags = 0;
1799         uint64_t last_item = 0;
1800         int tunnel = 0;
1801         uint8_t next_protocol = 0xff;
1802         int actions_n = 0;
1803
1804         if (items == NULL)
1805                 return -1;
1806         ret = flow_dv_validate_attributes(dev, attr, error);
1807         if (ret < 0)
1808                 return ret;
1809         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1810                 tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1811                 switch (items->type) {
1812                 case RTE_FLOW_ITEM_TYPE_VOID:
1813                         break;
1814                 case RTE_FLOW_ITEM_TYPE_ETH:
1815                         ret = mlx5_flow_validate_item_eth(items, item_flags,
1816                                                           error);
1817                         if (ret < 0)
1818                                 return ret;
1819                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1820                                              MLX5_FLOW_LAYER_OUTER_L2;
1821                         break;
1822                 case RTE_FLOW_ITEM_TYPE_VLAN:
1823                         ret = mlx5_flow_validate_item_vlan(items, item_flags,
1824                                                            error);
1825                         if (ret < 0)
1826                                 return ret;
1827                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
1828                                              MLX5_FLOW_LAYER_OUTER_VLAN;
1829                         break;
1830                 case RTE_FLOW_ITEM_TYPE_IPV4:
1831                         ret = mlx5_flow_validate_item_ipv4(items, item_flags,
1832                                                            NULL, error);
1833                         if (ret < 0)
1834                                 return ret;
1835                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1836                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1837                         if (items->mask != NULL &&
1838                             ((const struct rte_flow_item_ipv4 *)
1839                              items->mask)->hdr.next_proto_id) {
1840                                 next_protocol =
1841                                         ((const struct rte_flow_item_ipv4 *)
1842                                          (items->spec))->hdr.next_proto_id;
1843                                 next_protocol &=
1844                                         ((const struct rte_flow_item_ipv4 *)
1845                                          (items->mask))->hdr.next_proto_id;
1846                         } else {
1847                                 /* Reset for inner layer. */
1848                                 next_protocol = 0xff;
1849                         }
1850                         break;
1851                 case RTE_FLOW_ITEM_TYPE_IPV6:
1852                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
1853                                                            NULL, error);
1854                         if (ret < 0)
1855                                 return ret;
1856                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1857                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1858                         if (items->mask != NULL &&
1859                             ((const struct rte_flow_item_ipv6 *)
1860                              items->mask)->hdr.proto) {
1861                                 next_protocol =
1862                                         ((const struct rte_flow_item_ipv6 *)
1863                                          items->spec)->hdr.proto;
1864                                 next_protocol &=
1865                                         ((const struct rte_flow_item_ipv6 *)
1866                                          items->mask)->hdr.proto;
1867                         } else {
1868                                 /* Reset for inner layer. */
1869                                 next_protocol = 0xff;
1870                         }
1871                         break;
1872                 case RTE_FLOW_ITEM_TYPE_TCP:
1873                         ret = mlx5_flow_validate_item_tcp
1874                                                 (items, item_flags,
1875                                                  next_protocol,
1876                                                  &rte_flow_item_tcp_mask,
1877                                                  error);
1878                         if (ret < 0)
1879                                 return ret;
1880                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
1881                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
1882                         break;
1883                 case RTE_FLOW_ITEM_TYPE_UDP:
1884                         ret = mlx5_flow_validate_item_udp(items, item_flags,
1885                                                           next_protocol,
1886                                                           error);
1887                         if (ret < 0)
1888                                 return ret;
1889                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
1890                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
1891                         break;
1892                 case RTE_FLOW_ITEM_TYPE_GRE:
1893                 case RTE_FLOW_ITEM_TYPE_NVGRE:
1894                         ret = mlx5_flow_validate_item_gre(items, item_flags,
1895                                                           next_protocol, error);
1896                         if (ret < 0)
1897                                 return ret;
1898                         last_item = MLX5_FLOW_LAYER_GRE;
1899                         break;
1900                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1901                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
1902                                                             error);
1903                         if (ret < 0)
1904                                 return ret;
1905                         last_item = MLX5_FLOW_LAYER_VXLAN;
1906                         break;
1907                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1908                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
1909                                                                 item_flags, dev,
1910                                                                 error);
1911                         if (ret < 0)
1912                                 return ret;
1913                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
1914                         break;
1915                 case RTE_FLOW_ITEM_TYPE_MPLS:
1916                         ret = mlx5_flow_validate_item_mpls(dev, items,
1917                                                            item_flags,
1918                                                            last_item, error);
1919                         if (ret < 0)
1920                                 return ret;
1921                         last_item = MLX5_FLOW_LAYER_MPLS;
1922                         break;
1923                 case RTE_FLOW_ITEM_TYPE_META:
1924                         ret = flow_dv_validate_item_meta(dev, items, attr,
1925                                                          error);
1926                         if (ret < 0)
1927                                 return ret;
1928                         last_item = MLX5_FLOW_ITEM_METADATA;
1929                         break;
1930                 default:
1931                         return rte_flow_error_set(error, ENOTSUP,
1932                                                   RTE_FLOW_ERROR_TYPE_ITEM,
1933                                                   NULL, "item not supported");
1934                 }
1935                 item_flags |= last_item;
1936         }
1937         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1938                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
1939                         return rte_flow_error_set(error, ENOTSUP,
1940                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1941                                                   actions, "too many actions");
1942                 switch (actions->type) {
1943                 case RTE_FLOW_ACTION_TYPE_VOID:
1944                         break;
1945                 case RTE_FLOW_ACTION_TYPE_FLAG:
1946                         ret = mlx5_flow_validate_action_flag(action_flags,
1947                                                              attr, error);
1948                         if (ret < 0)
1949                                 return ret;
1950                         action_flags |= MLX5_FLOW_ACTION_FLAG;
1951                         ++actions_n;
1952                         break;
1953                 case RTE_FLOW_ACTION_TYPE_MARK:
1954                         ret = mlx5_flow_validate_action_mark(actions,
1955                                                              action_flags,
1956                                                              attr, error);
1957                         if (ret < 0)
1958                                 return ret;
1959                         action_flags |= MLX5_FLOW_ACTION_MARK;
1960                         ++actions_n;
1961                         break;
1962                 case RTE_FLOW_ACTION_TYPE_DROP:
1963                         ret = mlx5_flow_validate_action_drop(action_flags,
1964                                                              attr, error);
1965                         if (ret < 0)
1966                                 return ret;
1967                         action_flags |= MLX5_FLOW_ACTION_DROP;
1968                         ++actions_n;
1969                         break;
1970                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1971                         ret = mlx5_flow_validate_action_queue(actions,
1972                                                               action_flags, dev,
1973                                                               attr, error);
1974                         if (ret < 0)
1975                                 return ret;
1976                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
1977                         ++actions_n;
1978                         break;
1979                 case RTE_FLOW_ACTION_TYPE_RSS:
1980                         ret = mlx5_flow_validate_action_rss(actions,
1981                                                             action_flags, dev,
1982                                                             attr, error);
1983                         if (ret < 0)
1984                                 return ret;
1985                         action_flags |= MLX5_FLOW_ACTION_RSS;
1986                         ++actions_n;
1987                         break;
1988                 case RTE_FLOW_ACTION_TYPE_COUNT:
1989                         ret = flow_dv_validate_action_count(dev, error);
1990                         if (ret < 0)
1991                                 return ret;
1992                         action_flags |= MLX5_FLOW_ACTION_COUNT;
1993                         ++actions_n;
1994                         break;
1995                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
1996                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
1997                         ret = flow_dv_validate_action_l2_encap(action_flags,
1998                                                                actions, attr,
1999                                                                error);
2000                         if (ret < 0)
2001                                 return ret;
2002                         action_flags |= actions->type ==
2003                                         RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
2004                                         MLX5_FLOW_ACTION_VXLAN_ENCAP :
2005                                         MLX5_FLOW_ACTION_NVGRE_ENCAP;
2006                         ++actions_n;
2007                         break;
2008                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
2009                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
2010                         ret = flow_dv_validate_action_l2_decap(action_flags,
2011                                                                attr, error);
2012                         if (ret < 0)
2013                                 return ret;
2014                         action_flags |= actions->type ==
2015                                         RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
2016                                         MLX5_FLOW_ACTION_VXLAN_DECAP :
2017                                         MLX5_FLOW_ACTION_NVGRE_DECAP;
2018                         ++actions_n;
2019                         break;
2020                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
2021                         ret = flow_dv_validate_action_raw_encap(action_flags,
2022                                                                 actions, attr,
2023                                                                 error);
2024                         if (ret < 0)
2025                                 return ret;
2026                         action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
2027                         ++actions_n;
2028                         break;
2029                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
2030                         ret = flow_dv_validate_action_raw_decap(action_flags,
2031                                                                 actions, attr,
2032                                                                 error);
2033                         if (ret < 0)
2034                                 return ret;
2035                         action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
2036                         ++actions_n;
2037                         break;
2038                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
2039                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
2040                         ret = flow_dv_validate_action_modify_mac(action_flags,
2041                                                                  actions,
2042                                                                  item_flags,
2043                                                                  error);
2044                         if (ret < 0)
2045                                 return ret;
2046                         /* Count all modify-header actions as one action. */
2047                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2048                                 ++actions_n;
2049                         action_flags |= actions->type ==
2050                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
2051                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
2052                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
2053                         break;
2054
2055                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
2056                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
2057                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
2058                                                                   actions,
2059                                                                   item_flags,
2060                                                                   error);
2061                         if (ret < 0)
2062                                 return ret;
2063                         /* Count all modify-header actions as one action. */
2064                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2065                                 ++actions_n;
2066                         action_flags |= actions->type ==
2067                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
2068                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
2069                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
2070                         break;
2071                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
2072                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
2073                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
2074                                                                   actions,
2075                                                                   item_flags,
2076                                                                   error);
2077                         if (ret < 0)
2078                                 return ret;
2079                         /* Count all modify-header actions as one action. */
2080                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2081                                 ++actions_n;
2082                         action_flags |= actions->type ==
2083                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
2084                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
2085                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
2086                         break;
2087                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
2088                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
2089                         ret = flow_dv_validate_action_modify_tp(action_flags,
2090                                                                 actions,
2091                                                                 item_flags,
2092                                                                 error);
2093                         if (ret < 0)
2094                                 return ret;
2095                         /* Count all modify-header actions as one action. */
2096                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2097                                 ++actions_n;
2098                         action_flags |= actions->type ==
2099                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
2100                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
2101                                                 MLX5_FLOW_ACTION_SET_TP_DST;
2102                         break;
2103                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
2104                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
2105                         ret = flow_dv_validate_action_modify_ttl(action_flags,
2106                                                                  actions,
2107                                                                  item_flags,
2108                                                                  error);
2109                         if (ret < 0)
2110                                 return ret;
2111                         /* Count all modify-header actions as one action. */
2112                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2113                                 ++actions_n;
2114                         action_flags |= actions->type ==
2115                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
2116                                                 MLX5_FLOW_ACTION_SET_TTL :
2117                                                 MLX5_FLOW_ACTION_DEC_TTL;
2118                         break;
2119                 case RTE_FLOW_ACTION_TYPE_JUMP:
2120                         ret = flow_dv_validate_action_jump(actions,
2121                                                            attr->group, error);
2122                         if (ret)
2123                                 return ret;
2124                         ++actions_n;
2125                         action_flags |= MLX5_FLOW_ACTION_JUMP;
2126                         break;
2127                 default:
2128                         return rte_flow_error_set(error, ENOTSUP,
2129                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2130                                                   actions,
2131                                                   "action not supported");
2132                 }
2133         }
2134         if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
2135                 return rte_flow_error_set(error, EINVAL,
2136                                           RTE_FLOW_ERROR_TYPE_ACTION, actions,
2137                                           "no fate action is found");
2138         return 0;
2139 }
2140
2141 /**
2142  * Internal preparation function. Allocates the DV flow size,
2143  * this size is constant.
2144  *
2145  * @param[in] attr
2146  *   Pointer to the flow attributes.
2147  * @param[in] items
2148  *   Pointer to the list of items.
2149  * @param[in] actions
2150  *   Pointer to the list of actions.
2151  * @param[out] error
2152  *   Pointer to the error structure.
2153  *
2154  * @return
2155  *   Pointer to mlx5_flow object on success,
2156  *   otherwise NULL and rte_errno is set.
2157  */
2158 static struct mlx5_flow *
2159 flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
2160                 const struct rte_flow_item items[] __rte_unused,
2161                 const struct rte_flow_action actions[] __rte_unused,
2162                 struct rte_flow_error *error)
2163 {
2164         uint32_t size = sizeof(struct mlx5_flow);
2165         struct mlx5_flow *flow;
2166
2167         flow = rte_calloc(__func__, 1, size, 0);
2168         if (!flow) {
2169                 rte_flow_error_set(error, ENOMEM,
2170                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2171                                    "not enough memory to create flow");
2172                 return NULL;
2173         }
2174         flow->dv.value.size = MLX5_ST_SZ_DB(fte_match_param);
2175         return flow;
2176 }
2177
2178 #ifndef NDEBUG
2179 /**
2180  * Sanity check for match mask and value. Similar to check_valid_spec() in
2181  * kernel driver. If unmasked bit is present in value, it returns failure.
2182  *
2183  * @param match_mask
2184  *   pointer to match mask buffer.
2185  * @param match_value
2186  *   pointer to match value buffer.
2187  *
2188  * @return
2189  *   0 if valid, -EINVAL otherwise.
2190  */
2191 static int
2192 flow_dv_check_valid_spec(void *match_mask, void *match_value)
2193 {
2194         uint8_t *m = match_mask;
2195         uint8_t *v = match_value;
2196         unsigned int i;
2197
2198         for (i = 0; i < MLX5_ST_SZ_DB(fte_match_param); ++i) {
2199                 if (v[i] & ~m[i]) {
2200                         DRV_LOG(ERR,
2201                                 "match_value differs from match_criteria"
2202                                 " %p[%u] != %p[%u]",
2203                                 match_value, i, match_mask, i);
2204                         return -EINVAL;
2205                 }
2206         }
2207         return 0;
2208 }
2209 #endif
2210
2211 /**
2212  * Add Ethernet item to matcher and to the value.
2213  *
2214  * @param[in, out] matcher
2215  *   Flow matcher.
2216  * @param[in, out] key
2217  *   Flow matcher value.
2218  * @param[in] item
2219  *   Flow pattern to translate.
2220  * @param[in] inner
2221  *   Item is inner pattern.
2222  */
2223 static void
2224 flow_dv_translate_item_eth(void *matcher, void *key,
2225                            const struct rte_flow_item *item, int inner)
2226 {
2227         const struct rte_flow_item_eth *eth_m = item->mask;
2228         const struct rte_flow_item_eth *eth_v = item->spec;
2229         const struct rte_flow_item_eth nic_mask = {
2230                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
2231                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
2232                 .type = RTE_BE16(0xffff),
2233         };
2234         void *headers_m;
2235         void *headers_v;
2236         char *l24_v;
2237         unsigned int i;
2238
2239         if (!eth_v)
2240                 return;
2241         if (!eth_m)
2242                 eth_m = &nic_mask;
2243         if (inner) {
2244                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2245                                          inner_headers);
2246                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2247         } else {
2248                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2249                                          outer_headers);
2250                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2251         }
2252         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
2253                &eth_m->dst, sizeof(eth_m->dst));
2254         /* The value must be in the range of the mask. */
2255         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
2256         for (i = 0; i < sizeof(eth_m->dst); ++i)
2257                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
2258         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
2259                &eth_m->src, sizeof(eth_m->src));
2260         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
2261         /* The value must be in the range of the mask. */
2262         for (i = 0; i < sizeof(eth_m->dst); ++i)
2263                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
2264         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
2265                  rte_be_to_cpu_16(eth_m->type));
2266         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
2267         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
2268 }
2269
2270 /**
2271  * Add VLAN item to matcher and to the value.
2272  *
2273  * @param[in, out] matcher
2274  *   Flow matcher.
2275  * @param[in, out] key
2276  *   Flow matcher value.
2277  * @param[in] item
2278  *   Flow pattern to translate.
2279  * @param[in] inner
2280  *   Item is inner pattern.
2281  */
2282 static void
2283 flow_dv_translate_item_vlan(void *matcher, void *key,
2284                             const struct rte_flow_item *item,
2285                             int inner)
2286 {
2287         const struct rte_flow_item_vlan *vlan_m = item->mask;
2288         const struct rte_flow_item_vlan *vlan_v = item->spec;
2289         const struct rte_flow_item_vlan nic_mask = {
2290                 .tci = RTE_BE16(0x0fff),
2291                 .inner_type = RTE_BE16(0xffff),
2292         };
2293         void *headers_m;
2294         void *headers_v;
2295         uint16_t tci_m;
2296         uint16_t tci_v;
2297
2298         if (!vlan_v)
2299                 return;
2300         if (!vlan_m)
2301                 vlan_m = &nic_mask;
2302         if (inner) {
2303                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2304                                          inner_headers);
2305                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2306         } else {
2307                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2308                                          outer_headers);
2309                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2310         }
2311         tci_m = rte_be_to_cpu_16(vlan_m->tci);
2312         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
2313         MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
2314         MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
2315         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
2316         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
2317         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
2318         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
2319         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
2320         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
2321 }
2322
2323 /**
2324  * Add IPV4 item to matcher and to the value.
2325  *
2326  * @param[in, out] matcher
2327  *   Flow matcher.
2328  * @param[in, out] key
2329  *   Flow matcher value.
2330  * @param[in] item
2331  *   Flow pattern to translate.
2332  * @param[in] inner
2333  *   Item is inner pattern.
2334  * @param[in] group
2335  *   The group to insert the rule.
2336  */
2337 static void
2338 flow_dv_translate_item_ipv4(void *matcher, void *key,
2339                             const struct rte_flow_item *item,
2340                             int inner, uint32_t group)
2341 {
2342         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
2343         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
2344         const struct rte_flow_item_ipv4 nic_mask = {
2345                 .hdr = {
2346                         .src_addr = RTE_BE32(0xffffffff),
2347                         .dst_addr = RTE_BE32(0xffffffff),
2348                         .type_of_service = 0xff,
2349                         .next_proto_id = 0xff,
2350                 },
2351         };
2352         void *headers_m;
2353         void *headers_v;
2354         char *l24_m;
2355         char *l24_v;
2356         uint8_t tos;
2357
2358         if (inner) {
2359                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2360                                          inner_headers);
2361                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2362         } else {
2363                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2364                                          outer_headers);
2365                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2366         }
2367         if (group == 0)
2368                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
2369         else
2370                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x4);
2371         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
2372         if (!ipv4_v)
2373                 return;
2374         if (!ipv4_m)
2375                 ipv4_m = &nic_mask;
2376         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2377                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
2378         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2379                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
2380         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
2381         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
2382         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2383                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
2384         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2385                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
2386         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
2387         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
2388         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
2389         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
2390                  ipv4_m->hdr.type_of_service);
2391         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
2392         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
2393                  ipv4_m->hdr.type_of_service >> 2);
2394         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
2395         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
2396                  ipv4_m->hdr.next_proto_id);
2397         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2398                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
2399 }
2400
2401 /**
2402  * Add IPV6 item to matcher and to the value.
2403  *
2404  * @param[in, out] matcher
2405  *   Flow matcher.
2406  * @param[in, out] key
2407  *   Flow matcher value.
2408  * @param[in] item
2409  *   Flow pattern to translate.
2410  * @param[in] inner
2411  *   Item is inner pattern.
2412  * @param[in] group
2413  *   The group to insert the rule.
2414  */
2415 static void
2416 flow_dv_translate_item_ipv6(void *matcher, void *key,
2417                             const struct rte_flow_item *item,
2418                             int inner, uint32_t group)
2419 {
2420         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
2421         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
2422         const struct rte_flow_item_ipv6 nic_mask = {
2423                 .hdr = {
2424                         .src_addr =
2425                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
2426                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
2427                         .dst_addr =
2428                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
2429                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
2430                         .vtc_flow = RTE_BE32(0xffffffff),
2431                         .proto = 0xff,
2432                         .hop_limits = 0xff,
2433                 },
2434         };
2435         void *headers_m;
2436         void *headers_v;
2437         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2438         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2439         char *l24_m;
2440         char *l24_v;
2441         uint32_t vtc_m;
2442         uint32_t vtc_v;
2443         int i;
2444         int size;
2445
2446         if (inner) {
2447                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2448                                          inner_headers);
2449                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2450         } else {
2451                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2452                                          outer_headers);
2453                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2454         }
2455         if (group == 0)
2456                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
2457         else
2458                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x6);
2459         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
2460         if (!ipv6_v)
2461                 return;
2462         if (!ipv6_m)
2463                 ipv6_m = &nic_mask;
2464         size = sizeof(ipv6_m->hdr.dst_addr);
2465         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2466                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
2467         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2468                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
2469         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
2470         for (i = 0; i < size; ++i)
2471                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
2472         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2473                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
2474         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2475                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
2476         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
2477         for (i = 0; i < size; ++i)
2478                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
2479         /* TOS. */
2480         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
2481         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
2482         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
2483         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
2484         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
2485         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
2486         /* Label. */
2487         if (inner) {
2488                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
2489                          vtc_m);
2490                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
2491                          vtc_v);
2492         } else {
2493                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
2494                          vtc_m);
2495                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
2496                          vtc_v);
2497         }
2498         /* Protocol. */
2499         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
2500                  ipv6_m->hdr.proto);
2501         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2502                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
2503 }
2504
2505 /**
2506  * Add TCP item to matcher and to the value.
2507  *
2508  * @param[in, out] matcher
2509  *   Flow matcher.
2510  * @param[in, out] key
2511  *   Flow matcher value.
2512  * @param[in] item
2513  *   Flow pattern to translate.
2514  * @param[in] inner
2515  *   Item is inner pattern.
2516  */
2517 static void
2518 flow_dv_translate_item_tcp(void *matcher, void *key,
2519                            const struct rte_flow_item *item,
2520                            int inner)
2521 {
2522         const struct rte_flow_item_tcp *tcp_m = item->mask;
2523         const struct rte_flow_item_tcp *tcp_v = item->spec;
2524         void *headers_m;
2525         void *headers_v;
2526
2527         if (inner) {
2528                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2529                                          inner_headers);
2530                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2531         } else {
2532                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2533                                          outer_headers);
2534                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2535         }
2536         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2537         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
2538         if (!tcp_v)
2539                 return;
2540         if (!tcp_m)
2541                 tcp_m = &rte_flow_item_tcp_mask;
2542         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
2543                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
2544         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
2545                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
2546         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
2547                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
2548         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
2549                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
2550 }
2551
2552 /**
2553  * Add UDP item to matcher and to the value.
2554  *
2555  * @param[in, out] matcher
2556  *   Flow matcher.
2557  * @param[in, out] key
2558  *   Flow matcher value.
2559  * @param[in] item
2560  *   Flow pattern to translate.
2561  * @param[in] inner
2562  *   Item is inner pattern.
2563  */
2564 static void
2565 flow_dv_translate_item_udp(void *matcher, void *key,
2566                            const struct rte_flow_item *item,
2567                            int inner)
2568 {
2569         const struct rte_flow_item_udp *udp_m = item->mask;
2570         const struct rte_flow_item_udp *udp_v = item->spec;
2571         void *headers_m;
2572         void *headers_v;
2573
2574         if (inner) {
2575                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2576                                          inner_headers);
2577                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2578         } else {
2579                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2580                                          outer_headers);
2581                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2582         }
2583         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2584         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
2585         if (!udp_v)
2586                 return;
2587         if (!udp_m)
2588                 udp_m = &rte_flow_item_udp_mask;
2589         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
2590                  rte_be_to_cpu_16(udp_m->hdr.src_port));
2591         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
2592                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
2593         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
2594                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
2595         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
2596                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
2597 }
2598
2599 /**
2600  * Add GRE item to matcher and to the value.
2601  *
2602  * @param[in, out] matcher
2603  *   Flow matcher.
2604  * @param[in, out] key
2605  *   Flow matcher value.
2606  * @param[in] item
2607  *   Flow pattern to translate.
2608  * @param[in] inner
2609  *   Item is inner pattern.
2610  */
2611 static void
2612 flow_dv_translate_item_gre(void *matcher, void *key,
2613                            const struct rte_flow_item *item,
2614                            int inner)
2615 {
2616         const struct rte_flow_item_gre *gre_m = item->mask;
2617         const struct rte_flow_item_gre *gre_v = item->spec;
2618         void *headers_m;
2619         void *headers_v;
2620         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2621         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2622
2623         if (inner) {
2624                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2625                                          inner_headers);
2626                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2627         } else {
2628                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2629                                          outer_headers);
2630                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2631         }
2632         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2633         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
2634         if (!gre_v)
2635                 return;
2636         if (!gre_m)
2637                 gre_m = &rte_flow_item_gre_mask;
2638         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
2639                  rte_be_to_cpu_16(gre_m->protocol));
2640         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
2641                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
2642 }
2643
2644 /**
2645  * Add NVGRE item to matcher and to the value.
2646  *
2647  * @param[in, out] matcher
2648  *   Flow matcher.
2649  * @param[in, out] key
2650  *   Flow matcher value.
2651  * @param[in] item
2652  *   Flow pattern to translate.
2653  * @param[in] inner
2654  *   Item is inner pattern.
2655  */
2656 static void
2657 flow_dv_translate_item_nvgre(void *matcher, void *key,
2658                              const struct rte_flow_item *item,
2659                              int inner)
2660 {
2661         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
2662         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
2663         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2664         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2665         const char *tni_flow_id_m = (const char *)nvgre_m->tni;
2666         const char *tni_flow_id_v = (const char *)nvgre_v->tni;
2667         char *gre_key_m;
2668         char *gre_key_v;
2669         int size;
2670         int i;
2671
2672         flow_dv_translate_item_gre(matcher, key, item, inner);
2673         if (!nvgre_v)
2674                 return;
2675         if (!nvgre_m)
2676                 nvgre_m = &rte_flow_item_nvgre_mask;
2677         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
2678         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
2679         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
2680         memcpy(gre_key_m, tni_flow_id_m, size);
2681         for (i = 0; i < size; ++i)
2682                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
2683 }
2684
2685 /**
2686  * Add VXLAN item to matcher and to the value.
2687  *
2688  * @param[in, out] matcher
2689  *   Flow matcher.
2690  * @param[in, out] key
2691  *   Flow matcher value.
2692  * @param[in] item
2693  *   Flow pattern to translate.
2694  * @param[in] inner
2695  *   Item is inner pattern.
2696  */
2697 static void
2698 flow_dv_translate_item_vxlan(void *matcher, void *key,
2699                              const struct rte_flow_item *item,
2700                              int inner)
2701 {
2702         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
2703         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
2704         void *headers_m;
2705         void *headers_v;
2706         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2707         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2708         char *vni_m;
2709         char *vni_v;
2710         uint16_t dport;
2711         int size;
2712         int i;
2713
2714         if (inner) {
2715                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2716                                          inner_headers);
2717                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2718         } else {
2719                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2720                                          outer_headers);
2721                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2722         }
2723         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
2724                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
2725         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
2726                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
2727                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
2728         }
2729         if (!vxlan_v)
2730                 return;
2731         if (!vxlan_m)
2732                 vxlan_m = &rte_flow_item_vxlan_mask;
2733         size = sizeof(vxlan_m->vni);
2734         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
2735         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
2736         memcpy(vni_m, vxlan_m->vni, size);
2737         for (i = 0; i < size; ++i)
2738                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
2739 }
2740
2741 /**
2742  * Add MPLS item to matcher and to the value.
2743  *
2744  * @param[in, out] matcher
2745  *   Flow matcher.
2746  * @param[in, out] key
2747  *   Flow matcher value.
2748  * @param[in] item
2749  *   Flow pattern to translate.
2750  * @param[in] prev_layer
2751  *   The protocol layer indicated in previous item.
2752  * @param[in] inner
2753  *   Item is inner pattern.
2754  */
2755 static void
2756 flow_dv_translate_item_mpls(void *matcher, void *key,
2757                             const struct rte_flow_item *item,
2758                             uint64_t prev_layer,
2759                             int inner)
2760 {
2761         const uint32_t *in_mpls_m = item->mask;
2762         const uint32_t *in_mpls_v = item->spec;
2763         uint32_t *out_mpls_m = 0;
2764         uint32_t *out_mpls_v = 0;
2765         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2766         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2767         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
2768                                      misc_parameters_2);
2769         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
2770         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
2771         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2772
2773         switch (prev_layer) {
2774         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
2775                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
2776                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
2777                          MLX5_UDP_PORT_MPLS);
2778                 break;
2779         case MLX5_FLOW_LAYER_GRE:
2780                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
2781                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
2782                          ETHER_TYPE_MPLS);
2783                 break;
2784         default:
2785                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2786                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2787                          IPPROTO_MPLS);
2788                 break;
2789         }
2790         if (!in_mpls_v)
2791                 return;
2792         if (!in_mpls_m)
2793                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
2794         switch (prev_layer) {
2795         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
2796                 out_mpls_m =
2797                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
2798                                                  outer_first_mpls_over_udp);
2799                 out_mpls_v =
2800                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
2801                                                  outer_first_mpls_over_udp);
2802                 break;
2803         case MLX5_FLOW_LAYER_GRE:
2804                 out_mpls_m =
2805                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
2806                                                  outer_first_mpls_over_gre);
2807                 out_mpls_v =
2808                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
2809                                                  outer_first_mpls_over_gre);
2810                 break;
2811         default:
2812                 /* Inner MPLS not over GRE is not supported. */
2813                 if (!inner) {
2814                         out_mpls_m =
2815                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
2816                                                          misc2_m,
2817                                                          outer_first_mpls);
2818                         out_mpls_v =
2819                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
2820                                                          misc2_v,
2821                                                          outer_first_mpls);
2822                 }
2823                 break;
2824         }
2825         if (out_mpls_m && out_mpls_v) {
2826                 *out_mpls_m = *in_mpls_m;
2827                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
2828         }
2829 }
2830
2831 /**
2832  * Add META item to matcher
2833  *
2834  * @param[in, out] matcher
2835  *   Flow matcher.
2836  * @param[in, out] key
2837  *   Flow matcher value.
2838  * @param[in] item
2839  *   Flow pattern to translate.
2840  * @param[in] inner
2841  *   Item is inner pattern.
2842  */
2843 static void
2844 flow_dv_translate_item_meta(void *matcher, void *key,
2845                             const struct rte_flow_item *item)
2846 {
2847         const struct rte_flow_item_meta *meta_m;
2848         const struct rte_flow_item_meta *meta_v;
2849         void *misc2_m =
2850                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
2851         void *misc2_v =
2852                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
2853
2854         meta_m = (const void *)item->mask;
2855         if (!meta_m)
2856                 meta_m = &rte_flow_item_meta_mask;
2857         meta_v = (const void *)item->spec;
2858         if (meta_v) {
2859                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a,
2860                          rte_be_to_cpu_32(meta_m->data));
2861                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a,
2862                          rte_be_to_cpu_32(meta_v->data & meta_m->data));
2863         }
2864 }
2865
2866 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
2867
2868 #define HEADER_IS_ZERO(match_criteria, headers)                              \
2869         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
2870                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
2871
2872 /**
2873  * Calculate flow matcher enable bitmap.
2874  *
2875  * @param match_criteria
2876  *   Pointer to flow matcher criteria.
2877  *
2878  * @return
2879  *   Bitmap of enabled fields.
2880  */
2881 static uint8_t
2882 flow_dv_matcher_enable(uint32_t *match_criteria)
2883 {
2884         uint8_t match_criteria_enable;
2885
2886         match_criteria_enable =
2887                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
2888                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
2889         match_criteria_enable |=
2890                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
2891                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
2892         match_criteria_enable |=
2893                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
2894                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
2895         match_criteria_enable |=
2896                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
2897                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
2898 #ifdef HAVE_MLX5DV_DR
2899         match_criteria_enable |=
2900                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
2901                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
2902 #endif
2903         return match_criteria_enable;
2904 }
2905
2906
2907 /**
2908  * Get a flow table.
2909  *
2910  * @param dev[in, out]
2911  *   Pointer to rte_eth_dev structure.
2912  * @param[in] table_id
2913  *   Table id to use.
2914  * @param[in] egress
2915  *   Direction of the table.
2916  * @param[out] error
2917  *   pointer to error structure.
2918  *
2919  * @return
2920  *   Returns tables resource based on the index, NULL in case of failed.
2921  */
2922 static struct mlx5_flow_tbl_resource *
2923 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
2924                          uint32_t table_id, uint8_t egress,
2925                          struct rte_flow_error *error)
2926 {
2927         struct mlx5_priv *priv = dev->data->dev_private;
2928         struct mlx5_ibv_shared *sh = priv->sh;
2929         struct mlx5_flow_tbl_resource *tbl;
2930
2931 #ifdef HAVE_MLX5DV_DR
2932         if (egress) {
2933                 tbl = &sh->tx_tbl[table_id];
2934                 if (!tbl->obj)
2935                         tbl->obj = mlx5_glue->dr_create_flow_tbl
2936                                 (sh->tx_ns, table_id);
2937         } else {
2938                 tbl = &sh->rx_tbl[table_id];
2939                 if (!tbl->obj)
2940                         tbl->obj = mlx5_glue->dr_create_flow_tbl
2941                                 (sh->rx_ns, table_id);
2942         }
2943         if (!tbl->obj) {
2944                 rte_flow_error_set(error, ENOMEM,
2945                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2946                                    NULL, "cannot create table");
2947                 return NULL;
2948         }
2949         rte_atomic32_inc(&tbl->refcnt);
2950         return tbl;
2951 #else
2952         (void)error;
2953         (void)tbl;
2954         if (egress)
2955                 return &sh->tx_tbl[table_id];
2956         else
2957                 return &sh->rx_tbl[table_id];
2958 #endif
2959 }
2960
2961 /**
2962  * Release a flow table.
2963  *
2964  * @param[in] tbl
2965  *   Table resource to be released.
2966  *
2967  * @return
2968  *   Returns 0 if table was released, else return 1;
2969  */
2970 static int
2971 flow_dv_tbl_resource_release(struct mlx5_flow_tbl_resource *tbl)
2972 {
2973         if (!tbl)
2974                 return 0;
2975         if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
2976                 mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
2977                 tbl->obj = NULL;
2978                 return 0;
2979         }
2980         return 1;
2981 }
2982
2983 /**
2984  * Register the flow matcher.
2985  *
2986  * @param dev[in, out]
2987  *   Pointer to rte_eth_dev structure.
2988  * @param[in, out] matcher
2989  *   Pointer to flow matcher.
2990  * @parm[in, out] dev_flow
2991  *   Pointer to the dev_flow.
2992  * @param[out] error
2993  *   pointer to error structure.
2994  *
2995  * @return
2996  *   0 on success otherwise -errno and errno is set.
2997  */
2998 static int
2999 flow_dv_matcher_register(struct rte_eth_dev *dev,
3000                          struct mlx5_flow_dv_matcher *matcher,
3001                          struct mlx5_flow *dev_flow,
3002                          struct rte_flow_error *error)
3003 {
3004         struct mlx5_priv *priv = dev->data->dev_private;
3005         struct mlx5_ibv_shared *sh = priv->sh;
3006         struct mlx5_flow_dv_matcher *cache_matcher;
3007         struct mlx5dv_flow_matcher_attr dv_attr = {
3008                 .type = IBV_FLOW_ATTR_NORMAL,
3009                 .match_mask = (void *)&matcher->mask,
3010         };
3011         struct mlx5_flow_tbl_resource *tbl = NULL;
3012
3013         /* Lookup from cache. */
3014         LIST_FOREACH(cache_matcher, &sh->matchers, next) {
3015                 if (matcher->crc == cache_matcher->crc &&
3016                     matcher->priority == cache_matcher->priority &&
3017                     matcher->egress == cache_matcher->egress &&
3018                     matcher->group == cache_matcher->group &&
3019                     !memcmp((const void *)matcher->mask.buf,
3020                             (const void *)cache_matcher->mask.buf,
3021                             cache_matcher->mask.size)) {
3022                         DRV_LOG(DEBUG,
3023                                 "priority %hd use %s matcher %p: refcnt %d++",
3024                                 cache_matcher->priority,
3025                                 cache_matcher->egress ? "tx" : "rx",
3026                                 (void *)cache_matcher,
3027                                 rte_atomic32_read(&cache_matcher->refcnt));
3028                         rte_atomic32_inc(&cache_matcher->refcnt);
3029                         dev_flow->dv.matcher = cache_matcher;
3030                         return 0;
3031                 }
3032         }
3033         /* Register new matcher. */
3034         cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
3035         if (!cache_matcher)
3036                 return rte_flow_error_set(error, ENOMEM,
3037                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3038                                           "cannot allocate matcher memory");
3039         tbl = flow_dv_tbl_resource_get(dev, matcher->group * MLX5_GROUP_FACTOR,
3040                                        matcher->egress, error);
3041         if (!tbl) {
3042                 rte_free(cache_matcher);
3043                 return rte_flow_error_set(error, ENOMEM,
3044                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3045                                           NULL, "cannot create table");
3046         }
3047         *cache_matcher = *matcher;
3048         dv_attr.match_criteria_enable =
3049                 flow_dv_matcher_enable(cache_matcher->mask.buf);
3050         dv_attr.priority = matcher->priority;
3051         if (matcher->egress)
3052                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
3053         cache_matcher->matcher_object =
3054                 mlx5_glue->dv_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj);
3055         if (!cache_matcher->matcher_object) {
3056                 rte_free(cache_matcher);
3057 #ifdef HAVE_MLX5DV_DR
3058                 flow_dv_tbl_resource_release(tbl);
3059 #endif
3060                 return rte_flow_error_set(error, ENOMEM,
3061                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3062                                           NULL, "cannot create matcher");
3063         }
3064         rte_atomic32_inc(&cache_matcher->refcnt);
3065         LIST_INSERT_HEAD(&sh->matchers, cache_matcher, next);
3066         dev_flow->dv.matcher = cache_matcher;
3067         DRV_LOG(DEBUG, "priority %hd new %s matcher %p: refcnt %d",
3068                 cache_matcher->priority,
3069                 cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher,
3070                 rte_atomic32_read(&cache_matcher->refcnt));
3071         rte_atomic32_inc(&tbl->refcnt);
3072         return 0;
3073 }
3074
3075 /**
3076  * Add source vport match to the specified matcher.
3077  *
3078  * @param[in, out] matcher
3079  *   Flow matcher.
3080  * @param[in, out] key
3081  *   Flow matcher value.
3082  * @param[in] port
3083  *   Source vport value to match
3084  * @param[in] mask
3085  *   Mask
3086  */
3087 static void
3088 flow_dv_translate_source_vport(void *matcher, void *key,
3089                               int16_t port, uint16_t mask)
3090 {
3091         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3092         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3093
3094         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
3095         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
3096 }
3097
3098 /**
3099  * Find existing tag resource or create and register a new one.
3100  *
3101  * @param dev[in, out]
3102  *   Pointer to rte_eth_dev structure.
3103  * @param[in, out] resource
3104  *   Pointer to tag resource.
3105  * @parm[in, out] dev_flow
3106  *   Pointer to the dev_flow.
3107  * @param[out] error
3108  *   pointer to error structure.
3109  *
3110  * @return
3111  *   0 on success otherwise -errno and errno is set.
3112  */
3113 static int
3114 flow_dv_tag_resource_register
3115                         (struct rte_eth_dev *dev,
3116                          struct mlx5_flow_dv_tag_resource *resource,
3117                          struct mlx5_flow *dev_flow,
3118                          struct rte_flow_error *error)
3119 {
3120         struct mlx5_priv *priv = dev->data->dev_private;
3121         struct mlx5_ibv_shared *sh = priv->sh;
3122         struct mlx5_flow_dv_tag_resource *cache_resource;
3123
3124         /* Lookup a matching resource from cache. */
3125         LIST_FOREACH(cache_resource, &sh->tags, next) {
3126                 if (resource->tag == cache_resource->tag) {
3127                         DRV_LOG(DEBUG, "tag resource %p: refcnt %d++",
3128                                 (void *)cache_resource,
3129                                 rte_atomic32_read(&cache_resource->refcnt));
3130                         rte_atomic32_inc(&cache_resource->refcnt);
3131                         dev_flow->flow->tag_resource = cache_resource;
3132                         return 0;
3133                 }
3134         }
3135         /* Register new  resource. */
3136         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
3137         if (!cache_resource)
3138                 return rte_flow_error_set(error, ENOMEM,
3139                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3140                                           "cannot allocate resource memory");
3141         *cache_resource = *resource;
3142         cache_resource->action = mlx5_glue->dv_create_flow_action_tag
3143                 (resource->tag);
3144         if (!cache_resource->action) {
3145                 rte_free(cache_resource);
3146                 return rte_flow_error_set(error, ENOMEM,
3147                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3148                                           NULL, "cannot create action");
3149         }
3150         rte_atomic32_init(&cache_resource->refcnt);
3151         rte_atomic32_inc(&cache_resource->refcnt);
3152         LIST_INSERT_HEAD(&sh->tags, cache_resource, next);
3153         dev_flow->flow->tag_resource = cache_resource;
3154         DRV_LOG(DEBUG, "new tag resource %p: refcnt %d++",
3155                 (void *)cache_resource,
3156                 rte_atomic32_read(&cache_resource->refcnt));
3157         return 0;
3158 }
3159
3160 /**
3161  * Release the tag.
3162  *
3163  * @param dev
3164  *   Pointer to Ethernet device.
3165  * @param flow
3166  *   Pointer to mlx5_flow.
3167  *
3168  * @return
3169  *   1 while a reference on it exists, 0 when freed.
3170  */
3171 static int
3172 flow_dv_tag_release(struct rte_eth_dev *dev,
3173                     struct mlx5_flow_dv_tag_resource *tag)
3174 {
3175         assert(tag);
3176         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
3177                 dev->data->port_id, (void *)tag,
3178                 rte_atomic32_read(&tag->refcnt));
3179         if (rte_atomic32_dec_and_test(&tag->refcnt)) {
3180                 claim_zero(mlx5_glue->destroy_flow_action(tag->action));
3181                 LIST_REMOVE(tag, next);
3182                 DRV_LOG(DEBUG, "port %u tag %p: removed",
3183                         dev->data->port_id, (void *)tag);
3184                 rte_free(tag);
3185                 return 0;
3186         }
3187         return 1;
3188 }
3189
3190 /**
3191  * Fill the flow with DV spec.
3192  *
3193  * @param[in] dev
3194  *   Pointer to rte_eth_dev structure.
3195  * @param[in, out] dev_flow
3196  *   Pointer to the sub flow.
3197  * @param[in] attr
3198  *   Pointer to the flow attributes.
3199  * @param[in] items
3200  *   Pointer to the list of items.
3201  * @param[in] actions
3202  *   Pointer to the list of actions.
3203  * @param[out] error
3204  *   Pointer to the error structure.
3205  *
3206  * @return
3207  *   0 on success, a negative errno value otherwise and rte_errno is set.
3208  */
3209 static int
3210 flow_dv_translate(struct rte_eth_dev *dev,
3211                   struct mlx5_flow *dev_flow,
3212                   const struct rte_flow_attr *attr,
3213                   const struct rte_flow_item items[],
3214                   const struct rte_flow_action actions[],
3215                   struct rte_flow_error *error)
3216 {
3217         struct mlx5_priv *priv = dev->data->dev_private;
3218         struct rte_flow *flow = dev_flow->flow;
3219         uint64_t item_flags = 0;
3220         uint64_t last_item = 0;
3221         uint64_t action_flags = 0;
3222         uint64_t priority = attr->priority;
3223         struct mlx5_flow_dv_matcher matcher = {
3224                 .mask = {
3225                         .size = sizeof(matcher.mask.buf),
3226                 },
3227         };
3228         int actions_n = 0;
3229         bool actions_end = false;
3230         struct mlx5_flow_dv_modify_hdr_resource res = {
3231                 .ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3232                                           MLX5DV_FLOW_TABLE_TYPE_NIC_RX
3233         };
3234         union flow_dv_attr flow_attr = { .attr = 0 };
3235         struct mlx5_flow_dv_tag_resource tag_resource;
3236
3237         if (priority == MLX5_FLOW_PRIO_RSVD)
3238                 priority = priv->config.flow_prio - 1;
3239         for (; !actions_end ; actions++) {
3240                 const struct rte_flow_action_queue *queue;
3241                 const struct rte_flow_action_rss *rss;
3242                 const struct rte_flow_action *action = actions;
3243                 const struct rte_flow_action_count *count = action->conf;
3244                 const uint8_t *rss_key;
3245                 const struct rte_flow_action_jump *jump_data;
3246                 struct mlx5_flow_dv_jump_tbl_resource jump_tbl_resource;
3247                 struct mlx5_flow_tbl_resource *tbl;
3248
3249                 switch (actions->type) {
3250                 case RTE_FLOW_ACTION_TYPE_VOID:
3251                         break;
3252                 case RTE_FLOW_ACTION_TYPE_FLAG:
3253                         tag_resource.tag =
3254                                 mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
3255                         if (!flow->tag_resource)
3256                                 if (flow_dv_tag_resource_register
3257                                     (dev, &tag_resource, dev_flow, error))
3258                                         return errno;
3259                         dev_flow->dv.actions[actions_n++] =
3260                                 flow->tag_resource->action;
3261                         action_flags |= MLX5_FLOW_ACTION_FLAG;
3262                         break;
3263                 case RTE_FLOW_ACTION_TYPE_MARK:
3264                         tag_resource.tag = mlx5_flow_mark_set
3265                               (((const struct rte_flow_action_mark *)
3266                                (actions->conf))->id);
3267                         if (!flow->tag_resource)
3268                                 if (flow_dv_tag_resource_register
3269                                     (dev, &tag_resource, dev_flow, error))
3270                                         return errno;
3271                         dev_flow->dv.actions[actions_n++] =
3272                                 flow->tag_resource->action;
3273                         action_flags |= MLX5_FLOW_ACTION_MARK;
3274                         break;
3275                 case RTE_FLOW_ACTION_TYPE_DROP:
3276                         action_flags |= MLX5_FLOW_ACTION_DROP;
3277                         break;
3278                 case RTE_FLOW_ACTION_TYPE_QUEUE:
3279                         queue = actions->conf;
3280                         flow->rss.queue_num = 1;
3281                         (*flow->queue)[0] = queue->index;
3282                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
3283                         break;
3284                 case RTE_FLOW_ACTION_TYPE_RSS:
3285                         rss = actions->conf;
3286                         if (flow->queue)
3287                                 memcpy((*flow->queue), rss->queue,
3288                                        rss->queue_num * sizeof(uint16_t));
3289                         flow->rss.queue_num = rss->queue_num;
3290                         /* NULL RSS key indicates default RSS key. */
3291                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
3292                         memcpy(flow->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
3293                         /* RSS type 0 indicates default RSS type ETH_RSS_IP. */
3294                         flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
3295                         flow->rss.level = rss->level;
3296                         action_flags |= MLX5_FLOW_ACTION_RSS;
3297                         break;
3298                 case RTE_FLOW_ACTION_TYPE_COUNT:
3299                         if (!priv->config.devx) {
3300                                 rte_errno = ENOTSUP;
3301                                 goto cnt_err;
3302                         }
3303                         flow->counter = flow_dv_counter_new(dev, count->shared,
3304                                                             count->id);
3305                         if (flow->counter == NULL)
3306                                 goto cnt_err;
3307                         dev_flow->dv.actions[actions_n++] =
3308                                 flow->counter->action;
3309                         action_flags |= MLX5_FLOW_ACTION_COUNT;
3310                         break;
3311 cnt_err:
3312                         if (rte_errno == ENOTSUP)
3313                                 return rte_flow_error_set
3314                                               (error, ENOTSUP,
3315                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3316                                                NULL,
3317                                                "count action not supported");
3318                         else
3319                                 return rte_flow_error_set
3320                                                 (error, rte_errno,
3321                                                  RTE_FLOW_ERROR_TYPE_ACTION,
3322                                                  action,
3323                                                  "cannot create counter"
3324                                                   " object.");
3325                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3326                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
3327                         if (flow_dv_create_action_l2_encap(dev, actions,
3328                                                            dev_flow, error))
3329                                 return -rte_errno;
3330                         dev_flow->dv.actions[actions_n++] =
3331                                 dev_flow->dv.encap_decap->verbs_action;
3332                         action_flags |= actions->type ==
3333                                         RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
3334                                         MLX5_FLOW_ACTION_VXLAN_ENCAP :
3335                                         MLX5_FLOW_ACTION_NVGRE_ENCAP;
3336                         break;
3337                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
3338                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
3339                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
3340                                                            error))
3341                                 return -rte_errno;
3342                         dev_flow->dv.actions[actions_n++] =
3343                                 dev_flow->dv.encap_decap->verbs_action;
3344                         action_flags |= actions->type ==
3345                                         RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
3346                                         MLX5_FLOW_ACTION_VXLAN_DECAP :
3347                                         MLX5_FLOW_ACTION_NVGRE_DECAP;
3348                         break;
3349                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3350                         /* Handle encap with preceding decap. */
3351                         if (action_flags & MLX5_FLOW_ACTION_RAW_DECAP) {
3352                                 if (flow_dv_create_action_raw_encap
3353                                         (dev, actions, dev_flow, attr, error))
3354                                         return -rte_errno;
3355                                 dev_flow->dv.actions[actions_n++] =
3356                                         dev_flow->dv.encap_decap->verbs_action;
3357                         } else {
3358                                 /* Handle encap without preceding decap. */
3359                                 if (flow_dv_create_action_l2_encap(dev, actions,
3360                                                                    dev_flow,
3361                                                                    error))
3362                                         return -rte_errno;
3363                                 dev_flow->dv.actions[actions_n++] =
3364                                         dev_flow->dv.encap_decap->verbs_action;
3365                         }
3366                         action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
3367                         break;
3368                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
3369                         /* Check if this decap is followed by encap. */
3370                         for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
3371                                action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
3372                                action++) {
3373                         }
3374                         /* Handle decap only if it isn't followed by encap. */
3375                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
3376                                 if (flow_dv_create_action_l2_decap(dev,
3377                                                                    dev_flow,
3378                                                                    error))
3379                                         return -rte_errno;
3380                                 dev_flow->dv.actions[actions_n++] =
3381                                         dev_flow->dv.encap_decap->verbs_action;
3382                         }
3383                         /* If decap is followed by encap, handle it at encap. */
3384                         action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
3385                         break;
3386                 case RTE_FLOW_ACTION_TYPE_JUMP:
3387                         jump_data = action->conf;
3388                         tbl = flow_dv_tbl_resource_get(dev, jump_data->group *
3389                                                        MLX5_GROUP_FACTOR,
3390                                                        attr->egress, error);
3391                         if (!tbl)
3392                                 return rte_flow_error_set
3393                                                 (error, errno,
3394                                                  RTE_FLOW_ERROR_TYPE_ACTION,
3395                                                  NULL,
3396                                                  "cannot create jump action.");
3397                         jump_tbl_resource.tbl = tbl;
3398                         if (flow_dv_jump_tbl_resource_register
3399                             (dev, &jump_tbl_resource, dev_flow, error)) {
3400                                 flow_dv_tbl_resource_release(tbl);
3401                                 return rte_flow_error_set
3402                                                 (error, errno,
3403                                                  RTE_FLOW_ERROR_TYPE_ACTION,
3404                                                  NULL,
3405                                                  "cannot create jump action.");
3406                         }
3407                         dev_flow->dv.actions[actions_n++] =
3408                                 dev_flow->dv.jump->action;
3409                         action_flags |= MLX5_FLOW_ACTION_JUMP;
3410                         break;
3411                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
3412                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
3413                         if (flow_dv_convert_action_modify_mac(&res, actions,
3414                                                               error))
3415                                 return -rte_errno;
3416                         action_flags |= actions->type ==
3417                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
3418                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
3419                                         MLX5_FLOW_ACTION_SET_MAC_DST;
3420                         break;
3421                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
3422                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
3423                         if (flow_dv_convert_action_modify_ipv4(&res, actions,
3424                                                                error))
3425                                 return -rte_errno;
3426                         action_flags |= actions->type ==
3427                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
3428                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
3429                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
3430                         break;
3431                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
3432                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
3433                         if (flow_dv_convert_action_modify_ipv6(&res, actions,
3434                                                                error))
3435                                 return -rte_errno;
3436                         action_flags |= actions->type ==
3437                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
3438                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
3439                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
3440                         break;
3441                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
3442                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
3443                         if (flow_dv_convert_action_modify_tp(&res, actions,
3444                                                              items, &flow_attr,
3445                                                              error))
3446                                 return -rte_errno;
3447                         action_flags |= actions->type ==
3448                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
3449                                         MLX5_FLOW_ACTION_SET_TP_SRC :
3450                                         MLX5_FLOW_ACTION_SET_TP_DST;
3451                         break;
3452                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
3453                         if (flow_dv_convert_action_modify_dec_ttl(&res, items,
3454                                                                   &flow_attr,
3455                                                                   error))
3456                                 return -rte_errno;
3457                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
3458                         break;
3459                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
3460                         if (flow_dv_convert_action_modify_ttl(&res, actions,
3461                                                              items, &flow_attr,
3462                                                              error))
3463                                 return -rte_errno;
3464                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
3465                         break;
3466                 case RTE_FLOW_ACTION_TYPE_END:
3467                         actions_end = true;
3468                         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) {
3469                                 /* create modify action if needed. */
3470                                 if (flow_dv_modify_hdr_resource_register
3471                                                                 (dev, &res,
3472                                                                  dev_flow,
3473                                                                  error))
3474                                         return -rte_errno;
3475                                 dev_flow->dv.actions[actions_n++] =
3476                                         dev_flow->dv.modify_hdr->verbs_action;
3477                         }
3478                         break;
3479                 default:
3480                         break;
3481                 }
3482         }
3483         dev_flow->dv.actions_n = actions_n;
3484         flow->actions = action_flags;
3485         if (attr->ingress && !attr->transfer &&
3486             (priv->representor || priv->master)) {
3487                 /* It was validated - we support unidirection flows only. */
3488                 assert(!attr->egress);
3489                 /*
3490                  * Add matching on source vport index only
3491                  * for ingress rules in E-Switch configurations.
3492                  */
3493                 flow_dv_translate_source_vport(matcher.mask.buf,
3494                                                dev_flow->dv.value.buf,
3495                                                priv->vport_id,
3496                                                0xffff);
3497         }
3498         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
3499                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
3500                 void *match_mask = matcher.mask.buf;
3501                 void *match_value = dev_flow->dv.value.buf;
3502
3503                 switch (items->type) {
3504                 case RTE_FLOW_ITEM_TYPE_ETH:
3505                         flow_dv_translate_item_eth(match_mask, match_value,
3506                                                    items, tunnel);
3507                         matcher.priority = MLX5_PRIORITY_MAP_L2;
3508                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
3509                                              MLX5_FLOW_LAYER_OUTER_L2;
3510                         break;
3511                 case RTE_FLOW_ITEM_TYPE_VLAN:
3512                         flow_dv_translate_item_vlan(match_mask, match_value,
3513                                                     items, tunnel);
3514                         matcher.priority = MLX5_PRIORITY_MAP_L2;
3515                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
3516                                               MLX5_FLOW_LAYER_INNER_VLAN) :
3517                                              (MLX5_FLOW_LAYER_OUTER_L2 |
3518                                               MLX5_FLOW_LAYER_OUTER_VLAN);
3519                         break;
3520                 case RTE_FLOW_ITEM_TYPE_IPV4:
3521                         flow_dv_translate_item_ipv4(match_mask, match_value,
3522                                                     items, tunnel, attr->group);
3523                         matcher.priority = MLX5_PRIORITY_MAP_L3;
3524                         dev_flow->dv.hash_fields |=
3525                                 mlx5_flow_hashfields_adjust
3526                                         (dev_flow, tunnel,
3527                                          MLX5_IPV4_LAYER_TYPES,
3528                                          MLX5_IPV4_IBV_RX_HASH);
3529                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
3530                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3531                         break;
3532                 case RTE_FLOW_ITEM_TYPE_IPV6:
3533                         flow_dv_translate_item_ipv6(match_mask, match_value,
3534                                                     items, tunnel, attr->group);
3535                         matcher.priority = MLX5_PRIORITY_MAP_L3;
3536                         dev_flow->dv.hash_fields |=
3537                                 mlx5_flow_hashfields_adjust
3538                                         (dev_flow, tunnel,
3539                                          MLX5_IPV6_LAYER_TYPES,
3540                                          MLX5_IPV6_IBV_RX_HASH);
3541                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
3542                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3543                         break;
3544                 case RTE_FLOW_ITEM_TYPE_TCP:
3545                         flow_dv_translate_item_tcp(match_mask, match_value,
3546                                                    items, tunnel);
3547                         matcher.priority = MLX5_PRIORITY_MAP_L4;
3548                         dev_flow->dv.hash_fields |=
3549                                 mlx5_flow_hashfields_adjust
3550                                         (dev_flow, tunnel, ETH_RSS_TCP,
3551                                          IBV_RX_HASH_SRC_PORT_TCP |
3552                                          IBV_RX_HASH_DST_PORT_TCP);
3553                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
3554                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
3555                         break;
3556                 case RTE_FLOW_ITEM_TYPE_UDP:
3557                         flow_dv_translate_item_udp(match_mask, match_value,
3558                                                    items, tunnel);
3559                         matcher.priority = MLX5_PRIORITY_MAP_L4;
3560                         dev_flow->dv.hash_fields |=
3561                                 mlx5_flow_hashfields_adjust
3562                                         (dev_flow, tunnel, ETH_RSS_UDP,
3563                                          IBV_RX_HASH_SRC_PORT_UDP |
3564                                          IBV_RX_HASH_DST_PORT_UDP);
3565                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
3566                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
3567                         break;
3568                 case RTE_FLOW_ITEM_TYPE_GRE:
3569                         flow_dv_translate_item_gre(match_mask, match_value,
3570                                                    items, tunnel);
3571                         last_item = MLX5_FLOW_LAYER_GRE;
3572                         break;
3573                 case RTE_FLOW_ITEM_TYPE_NVGRE:
3574                         flow_dv_translate_item_nvgre(match_mask, match_value,
3575                                                      items, tunnel);
3576                         last_item = MLX5_FLOW_LAYER_GRE;
3577                         break;
3578                 case RTE_FLOW_ITEM_TYPE_VXLAN:
3579                         flow_dv_translate_item_vxlan(match_mask, match_value,
3580                                                      items, tunnel);
3581                         last_item = MLX5_FLOW_LAYER_VXLAN;
3582                         break;
3583                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3584                         flow_dv_translate_item_vxlan(match_mask, match_value,
3585                                                      items, tunnel);
3586                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
3587                         break;
3588                 case RTE_FLOW_ITEM_TYPE_MPLS:
3589                         flow_dv_translate_item_mpls(match_mask, match_value,
3590                                                     items, last_item, tunnel);
3591                         last_item = MLX5_FLOW_LAYER_MPLS;
3592                         break;
3593                 case RTE_FLOW_ITEM_TYPE_META:
3594                         flow_dv_translate_item_meta(match_mask, match_value,
3595                                                     items);
3596                         last_item = MLX5_FLOW_ITEM_METADATA;
3597                         break;
3598                 default:
3599                         break;
3600                 }
3601                 item_flags |= last_item;
3602         }
3603         assert(!flow_dv_check_valid_spec(matcher.mask.buf,
3604                                          dev_flow->dv.value.buf));
3605         dev_flow->layers = item_flags;
3606         /* Register matcher. */
3607         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
3608                                     matcher.mask.size);
3609         matcher.priority = mlx5_flow_adjust_priority(dev, priority,
3610                                                      matcher.priority);
3611         matcher.egress = attr->egress;
3612         matcher.group = attr->group;
3613         if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
3614                 return -rte_errno;
3615         return 0;
3616 }
3617
3618 /**
3619  * Apply the flow to the NIC.
3620  *
3621  * @param[in] dev
3622  *   Pointer to the Ethernet device structure.
3623  * @param[in, out] flow
3624  *   Pointer to flow structure.
3625  * @param[out] error
3626  *   Pointer to error structure.
3627  *
3628  * @return
3629  *   0 on success, a negative errno value otherwise and rte_errno is set.
3630  */
3631 static int
3632 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
3633               struct rte_flow_error *error)
3634 {
3635         struct mlx5_flow_dv *dv;
3636         struct mlx5_flow *dev_flow;
3637         int n;
3638         int err;
3639
3640         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
3641                 dv = &dev_flow->dv;
3642                 n = dv->actions_n;
3643                 if (flow->actions & MLX5_FLOW_ACTION_DROP) {
3644                         dv->hrxq = mlx5_hrxq_drop_new(dev);
3645                         if (!dv->hrxq) {
3646                                 rte_flow_error_set
3647                                         (error, errno,
3648                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3649                                          "cannot get drop hash queue");
3650                                 goto error;
3651                         }
3652                         dv->actions[n++] =
3653                                 mlx5_glue->dv_create_flow_action_dest_ibv_qp
3654                                 (dv->hrxq->qp);
3655                 } else if (flow->actions &
3656                            (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
3657                         struct mlx5_hrxq *hrxq;
3658
3659                         hrxq = mlx5_hrxq_get(dev, flow->key,
3660                                              MLX5_RSS_HASH_KEY_LEN,
3661                                              dv->hash_fields,
3662                                              (*flow->queue),
3663                                              flow->rss.queue_num);
3664                         if (!hrxq)
3665                                 hrxq = mlx5_hrxq_new
3666                                         (dev, flow->key, MLX5_RSS_HASH_KEY_LEN,
3667                                          dv->hash_fields, (*flow->queue),
3668                                          flow->rss.queue_num,
3669                                          !!(dev_flow->layers &
3670                                             MLX5_FLOW_LAYER_TUNNEL));
3671                         if (!hrxq) {
3672                                 rte_flow_error_set
3673                                         (error, rte_errno,
3674                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3675                                          "cannot get hash queue");
3676                                 goto error;
3677                         }
3678                         dv->hrxq = hrxq;
3679                         dv->actions[n++] =
3680                                 mlx5_glue->dv_create_flow_action_dest_ibv_qp
3681                                 (dv->hrxq->qp);
3682                 }
3683                 dv->flow =
3684                         mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
3685                                                   (void *)&dv->value, n,
3686                                                   dv->actions);
3687                 if (!dv->flow) {
3688                         rte_flow_error_set(error, errno,
3689                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3690                                            NULL,
3691                                            "hardware refuses to create flow");
3692                         goto error;
3693                 }
3694         }
3695         return 0;
3696 error:
3697         err = rte_errno; /* Save rte_errno before cleanup. */
3698         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
3699                 struct mlx5_flow_dv *dv = &dev_flow->dv;
3700                 if (dv->hrxq) {
3701                         if (flow->actions & MLX5_FLOW_ACTION_DROP)
3702                                 mlx5_hrxq_drop_release(dev);
3703                         else
3704                                 mlx5_hrxq_release(dev, dv->hrxq);
3705                         dv->hrxq = NULL;
3706                 }
3707         }
3708         rte_errno = err; /* Restore rte_errno. */
3709         return -rte_errno;
3710 }
3711
3712 /**
3713  * Release the flow matcher.
3714  *
3715  * @param dev
3716  *   Pointer to Ethernet device.
3717  * @param flow
3718  *   Pointer to mlx5_flow.
3719  *
3720  * @return
3721  *   1 while a reference on it exists, 0 when freed.
3722  */
3723 static int
3724 flow_dv_matcher_release(struct rte_eth_dev *dev,
3725                         struct mlx5_flow *flow)
3726 {
3727         struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
3728         struct mlx5_priv *priv = dev->data->dev_private;
3729         struct mlx5_ibv_shared *sh = priv->sh;
3730         struct mlx5_flow_tbl_resource *tbl;
3731
3732         assert(matcher->matcher_object);
3733         DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
3734                 dev->data->port_id, (void *)matcher,
3735                 rte_atomic32_read(&matcher->refcnt));
3736         if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
3737                 claim_zero(mlx5_glue->dv_destroy_flow_matcher
3738                            (matcher->matcher_object));
3739                 LIST_REMOVE(matcher, next);
3740                 if (matcher->egress)
3741                         tbl = &sh->tx_tbl[matcher->group];
3742                 else
3743                         tbl = &sh->rx_tbl[matcher->group];
3744                 flow_dv_tbl_resource_release(tbl);
3745                 rte_free(matcher);
3746                 DRV_LOG(DEBUG, "port %u matcher %p: removed",
3747                         dev->data->port_id, (void *)matcher);
3748                 return 0;
3749         }
3750         return 1;
3751 }
3752
3753 /**
3754  * Release an encap/decap resource.
3755  *
3756  * @param flow
3757  *   Pointer to mlx5_flow.
3758  *
3759  * @return
3760  *   1 while a reference on it exists, 0 when freed.
3761  */
3762 static int
3763 flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
3764 {
3765         struct mlx5_flow_dv_encap_decap_resource *cache_resource =
3766                                                 flow->dv.encap_decap;
3767
3768         assert(cache_resource->verbs_action);
3769         DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
3770                 (void *)cache_resource,
3771                 rte_atomic32_read(&cache_resource->refcnt));
3772         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
3773                 claim_zero(mlx5_glue->destroy_flow_action
3774                                 (cache_resource->verbs_action));
3775                 LIST_REMOVE(cache_resource, next);
3776                 rte_free(cache_resource);
3777                 DRV_LOG(DEBUG, "encap/decap resource %p: removed",
3778                         (void *)cache_resource);
3779                 return 0;
3780         }
3781         return 1;
3782 }
3783
3784 /**
3785  * Release an jump to table action resource.
3786  *
3787  * @param flow
3788  *   Pointer to mlx5_flow.
3789  *
3790  * @return
3791  *   1 while a reference on it exists, 0 when freed.
3792  */
3793 static int
3794 flow_dv_jump_tbl_resource_release(struct mlx5_flow *flow)
3795 {
3796         struct mlx5_flow_dv_jump_tbl_resource *cache_resource =
3797                                                 flow->dv.jump;
3798
3799         assert(cache_resource->action);
3800         DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--",
3801                 (void *)cache_resource,
3802                 rte_atomic32_read(&cache_resource->refcnt));
3803         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
3804                 claim_zero(mlx5_glue->destroy_flow_action
3805                                 (cache_resource->action));
3806                 LIST_REMOVE(cache_resource, next);
3807                 flow_dv_tbl_resource_release(cache_resource->tbl);
3808                 rte_free(cache_resource);
3809                 DRV_LOG(DEBUG, "jump table resource %p: removed",
3810                         (void *)cache_resource);
3811                 return 0;
3812         }
3813         return 1;
3814 }
3815
3816 /**
3817  * Release a modify-header resource.
3818  *
3819  * @param flow
3820  *   Pointer to mlx5_flow.
3821  *
3822  * @return
3823  *   1 while a reference on it exists, 0 when freed.
3824  */
3825 static int
3826 flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow)
3827 {
3828         struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
3829                                                 flow->dv.modify_hdr;
3830
3831         assert(cache_resource->verbs_action);
3832         DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
3833                 (void *)cache_resource,
3834                 rte_atomic32_read(&cache_resource->refcnt));
3835         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
3836                 claim_zero(mlx5_glue->destroy_flow_action
3837                                 (cache_resource->verbs_action));
3838                 LIST_REMOVE(cache_resource, next);
3839                 rte_free(cache_resource);
3840                 DRV_LOG(DEBUG, "modify-header resource %p: removed",
3841                         (void *)cache_resource);
3842                 return 0;
3843         }
3844         return 1;
3845 }
3846
3847 /**
3848  * Remove the flow from the NIC but keeps it in memory.
3849  *
3850  * @param[in] dev
3851  *   Pointer to Ethernet device.
3852  * @param[in, out] flow
3853  *   Pointer to flow structure.
3854  */
3855 static void
3856 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
3857 {
3858         struct mlx5_flow_dv *dv;
3859         struct mlx5_flow *dev_flow;
3860
3861         if (!flow)
3862                 return;
3863         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
3864                 dv = &dev_flow->dv;
3865                 if (dv->flow) {
3866                         claim_zero(mlx5_glue->dv_destroy_flow(dv->flow));
3867                         dv->flow = NULL;
3868                 }
3869                 if (dv->hrxq) {
3870                         if (flow->actions & MLX5_FLOW_ACTION_DROP)
3871                                 mlx5_hrxq_drop_release(dev);
3872                         else
3873                                 mlx5_hrxq_release(dev, dv->hrxq);
3874                         dv->hrxq = NULL;
3875                 }
3876         }
3877 }
3878
3879 /**
3880  * Remove the flow from the NIC and the memory.
3881  *
3882  * @param[in] dev
3883  *   Pointer to the Ethernet device structure.
3884  * @param[in, out] flow
3885  *   Pointer to flow structure.
3886  */
3887 static void
3888 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
3889 {
3890         struct mlx5_flow *dev_flow;
3891
3892         if (!flow)
3893                 return;
3894         flow_dv_remove(dev, flow);
3895         if (flow->counter) {
3896                 flow_dv_counter_release(flow->counter);
3897                 flow->counter = NULL;
3898         }
3899         if (flow->tag_resource) {
3900                 flow_dv_tag_release(dev, flow->tag_resource);
3901                 flow->tag_resource = NULL;
3902         }
3903         while (!LIST_EMPTY(&flow->dev_flows)) {
3904                 dev_flow = LIST_FIRST(&flow->dev_flows);
3905                 LIST_REMOVE(dev_flow, next);
3906                 if (dev_flow->dv.matcher)
3907                         flow_dv_matcher_release(dev, dev_flow);
3908                 if (dev_flow->dv.encap_decap)
3909                         flow_dv_encap_decap_resource_release(dev_flow);
3910                 if (dev_flow->dv.modify_hdr)
3911                         flow_dv_modify_hdr_resource_release(dev_flow);
3912                 if (dev_flow->dv.jump)
3913                         flow_dv_jump_tbl_resource_release(dev_flow);
3914                 rte_free(dev_flow);
3915         }
3916 }
3917
3918 /**
3919  * Query a dv flow  rule for its statistics via devx.
3920  *
3921  * @param[in] dev
3922  *   Pointer to Ethernet device.
3923  * @param[in] flow
3924  *   Pointer to the sub flow.
3925  * @param[out] data
3926  *   data retrieved by the query.
3927  * @param[out] error
3928  *   Perform verbose error reporting if not NULL.
3929  *
3930  * @return
3931  *   0 on success, a negative errno value otherwise and rte_errno is set.
3932  */
3933 static int
3934 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
3935                     void *data, struct rte_flow_error *error)
3936 {
3937         struct mlx5_priv *priv = dev->data->dev_private;
3938         struct rte_flow_query_count *qc = data;
3939         uint64_t pkts = 0;
3940         uint64_t bytes = 0;
3941         int err;
3942
3943         if (!priv->config.devx)
3944                 return rte_flow_error_set(error, ENOTSUP,
3945                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3946                                           NULL,
3947                                           "counters are not supported");
3948         if (flow->counter) {
3949                 err = mlx5_devx_cmd_flow_counter_query
3950                                                 (flow->counter->dcs,
3951                                                  qc->reset, &pkts, &bytes);
3952                 if (err)
3953                         return rte_flow_error_set
3954                                 (error, err,
3955                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3956                                  NULL,
3957                                  "cannot read counters");
3958                 qc->hits_set = 1;
3959                 qc->bytes_set = 1;
3960                 qc->hits = pkts - flow->counter->hits;
3961                 qc->bytes = bytes - flow->counter->bytes;
3962                 if (qc->reset) {
3963                         flow->counter->hits = pkts;
3964                         flow->counter->bytes = bytes;
3965                 }
3966                 return 0;
3967         }
3968         return rte_flow_error_set(error, EINVAL,
3969                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3970                                   NULL,
3971                                   "counters are not available");
3972 }
3973
3974 /**
3975  * Query a flow.
3976  *
3977  * @see rte_flow_query()
3978  * @see rte_flow_ops
3979  */
3980 static int
3981 flow_dv_query(struct rte_eth_dev *dev,
3982               struct rte_flow *flow __rte_unused,
3983               const struct rte_flow_action *actions __rte_unused,
3984               void *data __rte_unused,
3985               struct rte_flow_error *error __rte_unused)
3986 {
3987         int ret = -EINVAL;
3988
3989         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3990                 switch (actions->type) {
3991                 case RTE_FLOW_ACTION_TYPE_VOID:
3992                         break;
3993                 case RTE_FLOW_ACTION_TYPE_COUNT:
3994                         ret = flow_dv_query_count(dev, flow, data, error);
3995                         break;
3996                 default:
3997                         return rte_flow_error_set(error, ENOTSUP,
3998                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3999                                                   actions,
4000                                                   "action not supported");
4001                 }
4002         }
4003         return ret;
4004 }
4005
4006 /*
4007  * Mutex-protected thunk to flow_dv_translate().
4008  */
4009 static int
4010 flow_d_translate(struct rte_eth_dev *dev,
4011                  struct mlx5_flow *dev_flow,
4012                  const struct rte_flow_attr *attr,
4013                  const struct rte_flow_item items[],
4014                  const struct rte_flow_action actions[],
4015                  struct rte_flow_error *error)
4016 {
4017         int ret;
4018
4019         flow_d_shared_lock(dev);
4020         ret = flow_dv_translate(dev, dev_flow, attr, items, actions, error);
4021         flow_d_shared_unlock(dev);
4022         return ret;
4023 }
4024
4025 /*
4026  * Mutex-protected thunk to flow_dv_apply().
4027  */
4028 static int
4029 flow_d_apply(struct rte_eth_dev *dev,
4030              struct rte_flow *flow,
4031              struct rte_flow_error *error)
4032 {
4033         int ret;
4034
4035         flow_d_shared_lock(dev);
4036         ret = flow_dv_apply(dev, flow, error);
4037         flow_d_shared_unlock(dev);
4038         return ret;
4039 }
4040
4041 /*
4042  * Mutex-protected thunk to flow_dv_remove().
4043  */
4044 static void
4045 flow_d_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
4046 {
4047         flow_d_shared_lock(dev);
4048         flow_dv_remove(dev, flow);
4049         flow_d_shared_unlock(dev);
4050 }
4051
4052 /*
4053  * Mutex-protected thunk to flow_dv_destroy().
4054  */
4055 static void
4056 flow_d_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
4057 {
4058         flow_d_shared_lock(dev);
4059         flow_dv_destroy(dev, flow);
4060         flow_d_shared_unlock(dev);
4061 }
4062
4063 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
4064         .validate = flow_dv_validate,
4065         .prepare = flow_dv_prepare,
4066         .translate = flow_d_translate,
4067         .apply = flow_d_apply,
4068         .remove = flow_d_remove,
4069         .destroy = flow_d_destroy,
4070         .query = flow_dv_query,
4071 };
4072
4073 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */