net/mlx5: fix recursive inclusion of header file
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9
10 /* Verbs header. */
11 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
12 #ifdef PEDANTIC
13 #pragma GCC diagnostic ignored "-Wpedantic"
14 #endif
15 #include <infiniband/verbs.h>
16 #ifdef PEDANTIC
17 #pragma GCC diagnostic error "-Wpedantic"
18 #endif
19
20 #include <rte_common.h>
21 #include <rte_ether.h>
22 #include <rte_eth_ctrl.h>
23 #include <rte_ethdev_driver.h>
24 #include <rte_flow.h>
25 #include <rte_flow_driver.h>
26 #include <rte_malloc.h>
27 #include <rte_ip.h>
28 #include <rte_gre.h>
29
30 #include "mlx5.h"
31 #include "mlx5_defs.h"
32 #include "mlx5_glue.h"
33 #include "mlx5_flow.h"
34 #include "mlx5_prm.h"
35 #include "mlx5_rxtx.h"
36
37 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
38
39 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
40 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
41 #endif
42
43 union flow_dv_attr {
44         struct {
45                 uint32_t valid:1;
46                 uint32_t ipv4:1;
47                 uint32_t ipv6:1;
48                 uint32_t tcp:1;
49                 uint32_t udp:1;
50                 uint32_t reserved:27;
51         };
52         uint32_t attr;
53 };
54
55 /**
56  * Initialize flow attributes structure according to flow items' types.
57  *
58  * @param[in] item
59  *   Pointer to item specification.
60  * @param[out] attr
61  *   Pointer to flow attributes structure.
62  */
63 static void
64 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr)
65 {
66         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
67                 switch (item->type) {
68                 case RTE_FLOW_ITEM_TYPE_IPV4:
69                         attr->ipv4 = 1;
70                         break;
71                 case RTE_FLOW_ITEM_TYPE_IPV6:
72                         attr->ipv6 = 1;
73                         break;
74                 case RTE_FLOW_ITEM_TYPE_UDP:
75                         attr->udp = 1;
76                         break;
77                 case RTE_FLOW_ITEM_TYPE_TCP:
78                         attr->tcp = 1;
79                         break;
80                 default:
81                         break;
82                 }
83         }
84         attr->valid = 1;
85 }
86
87 struct field_modify_info {
88         uint32_t size; /* Size of field in protocol header, in bytes. */
89         uint32_t offset; /* Offset of field in protocol header, in bytes. */
90         enum mlx5_modification_field id;
91 };
92
93 struct field_modify_info modify_eth[] = {
94         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
95         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
96         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
97         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
98         {0, 0, 0},
99 };
100
101 struct field_modify_info modify_ipv4[] = {
102         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
103         {4, 12, MLX5_MODI_OUT_SIPV4},
104         {4, 16, MLX5_MODI_OUT_DIPV4},
105         {0, 0, 0},
106 };
107
108 struct field_modify_info modify_ipv6[] = {
109         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
110         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
111         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
112         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
113         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
114         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
115         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
116         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
117         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
118         {0, 0, 0},
119 };
120
121 struct field_modify_info modify_udp[] = {
122         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
123         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
124         {0, 0, 0},
125 };
126
127 struct field_modify_info modify_tcp[] = {
128         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
129         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
130         {0, 0, 0},
131 };
132
133 /**
134  * Acquire the synchronizing object to protect multithreaded access
135  * to shared dv context. Lock occurs only if context is actually
136  * shared, i.e. we have multiport IB device and representors are
137  * created.
138  *
139  * @param[in] dev
140  *   Pointer to the rte_eth_dev structure.
141  */
142 static void
143 flow_d_shared_lock(struct rte_eth_dev *dev)
144 {
145         struct mlx5_priv *priv = dev->data->dev_private;
146         struct mlx5_ibv_shared *sh = priv->sh;
147
148         if (sh->dv_refcnt > 1) {
149                 int ret;
150
151                 ret = pthread_mutex_lock(&sh->dv_mutex);
152                 assert(!ret);
153                 (void)ret;
154         }
155 }
156
157 static void
158 flow_d_shared_unlock(struct rte_eth_dev *dev)
159 {
160         struct mlx5_priv *priv = dev->data->dev_private;
161         struct mlx5_ibv_shared *sh = priv->sh;
162
163         if (sh->dv_refcnt > 1) {
164                 int ret;
165
166                 ret = pthread_mutex_unlock(&sh->dv_mutex);
167                 assert(!ret);
168                 (void)ret;
169         }
170 }
171
172 /**
173  * Convert modify-header action to DV specification.
174  *
175  * @param[in] item
176  *   Pointer to item specification.
177  * @param[in] field
178  *   Pointer to field modification information.
179  * @param[in,out] resource
180  *   Pointer to the modify-header resource.
181  * @param[in] type
182  *   Type of modification.
183  * @param[out] error
184  *   Pointer to the error structure.
185  *
186  * @return
187  *   0 on success, a negative errno value otherwise and rte_errno is set.
188  */
189 static int
190 flow_dv_convert_modify_action(struct rte_flow_item *item,
191                               struct field_modify_info *field,
192                               struct mlx5_flow_dv_modify_hdr_resource *resource,
193                               uint32_t type,
194                               struct rte_flow_error *error)
195 {
196         uint32_t i = resource->actions_num;
197         struct mlx5_modification_cmd *actions = resource->actions;
198         const uint8_t *spec = item->spec;
199         const uint8_t *mask = item->mask;
200         uint32_t set;
201
202         while (field->size) {
203                 set = 0;
204                 /* Generate modify command for each mask segment. */
205                 memcpy(&set, &mask[field->offset], field->size);
206                 if (set) {
207                         if (i >= MLX5_MODIFY_NUM)
208                                 return rte_flow_error_set(error, EINVAL,
209                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
210                                          "too many items to modify");
211                         actions[i].action_type = type;
212                         actions[i].field = field->id;
213                         actions[i].length = field->size ==
214                                         4 ? 0 : field->size * 8;
215                         rte_memcpy(&actions[i].data[4 - field->size],
216                                    &spec[field->offset], field->size);
217                         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
218                         ++i;
219                 }
220                 if (resource->actions_num != i)
221                         resource->actions_num = i;
222                 field++;
223         }
224         if (!resource->actions_num)
225                 return rte_flow_error_set(error, EINVAL,
226                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
227                                           "invalid modification flow item");
228         return 0;
229 }
230
231 /**
232  * Convert modify-header set IPv4 address action to DV specification.
233  *
234  * @param[in,out] resource
235  *   Pointer to the modify-header resource.
236  * @param[in] action
237  *   Pointer to action specification.
238  * @param[out] error
239  *   Pointer to the error structure.
240  *
241  * @return
242  *   0 on success, a negative errno value otherwise and rte_errno is set.
243  */
244 static int
245 flow_dv_convert_action_modify_ipv4
246                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
247                          const struct rte_flow_action *action,
248                          struct rte_flow_error *error)
249 {
250         const struct rte_flow_action_set_ipv4 *conf =
251                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
252         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
253         struct rte_flow_item_ipv4 ipv4;
254         struct rte_flow_item_ipv4 ipv4_mask;
255
256         memset(&ipv4, 0, sizeof(ipv4));
257         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
258         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
259                 ipv4.hdr.src_addr = conf->ipv4_addr;
260                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
261         } else {
262                 ipv4.hdr.dst_addr = conf->ipv4_addr;
263                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
264         }
265         item.spec = &ipv4;
266         item.mask = &ipv4_mask;
267         return flow_dv_convert_modify_action(&item, modify_ipv4, resource,
268                                              MLX5_MODIFICATION_TYPE_SET, error);
269 }
270
271 /**
272  * Convert modify-header set IPv6 address action to DV specification.
273  *
274  * @param[in,out] resource
275  *   Pointer to the modify-header resource.
276  * @param[in] action
277  *   Pointer to action specification.
278  * @param[out] error
279  *   Pointer to the error structure.
280  *
281  * @return
282  *   0 on success, a negative errno value otherwise and rte_errno is set.
283  */
284 static int
285 flow_dv_convert_action_modify_ipv6
286                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
287                          const struct rte_flow_action *action,
288                          struct rte_flow_error *error)
289 {
290         const struct rte_flow_action_set_ipv6 *conf =
291                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
292         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
293         struct rte_flow_item_ipv6 ipv6;
294         struct rte_flow_item_ipv6 ipv6_mask;
295
296         memset(&ipv6, 0, sizeof(ipv6));
297         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
298         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
299                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
300                        sizeof(ipv6.hdr.src_addr));
301                 memcpy(&ipv6_mask.hdr.src_addr,
302                        &rte_flow_item_ipv6_mask.hdr.src_addr,
303                        sizeof(ipv6.hdr.src_addr));
304         } else {
305                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
306                        sizeof(ipv6.hdr.dst_addr));
307                 memcpy(&ipv6_mask.hdr.dst_addr,
308                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
309                        sizeof(ipv6.hdr.dst_addr));
310         }
311         item.spec = &ipv6;
312         item.mask = &ipv6_mask;
313         return flow_dv_convert_modify_action(&item, modify_ipv6, resource,
314                                              MLX5_MODIFICATION_TYPE_SET, error);
315 }
316
317 /**
318  * Convert modify-header set MAC address action to DV specification.
319  *
320  * @param[in,out] resource
321  *   Pointer to the modify-header resource.
322  * @param[in] action
323  *   Pointer to action specification.
324  * @param[out] error
325  *   Pointer to the error structure.
326  *
327  * @return
328  *   0 on success, a negative errno value otherwise and rte_errno is set.
329  */
330 static int
331 flow_dv_convert_action_modify_mac
332                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
333                          const struct rte_flow_action *action,
334                          struct rte_flow_error *error)
335 {
336         const struct rte_flow_action_set_mac *conf =
337                 (const struct rte_flow_action_set_mac *)(action->conf);
338         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
339         struct rte_flow_item_eth eth;
340         struct rte_flow_item_eth eth_mask;
341
342         memset(&eth, 0, sizeof(eth));
343         memset(&eth_mask, 0, sizeof(eth_mask));
344         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
345                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
346                        sizeof(eth.src.addr_bytes));
347                 memcpy(&eth_mask.src.addr_bytes,
348                        &rte_flow_item_eth_mask.src.addr_bytes,
349                        sizeof(eth_mask.src.addr_bytes));
350         } else {
351                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
352                        sizeof(eth.dst.addr_bytes));
353                 memcpy(&eth_mask.dst.addr_bytes,
354                        &rte_flow_item_eth_mask.dst.addr_bytes,
355                        sizeof(eth_mask.dst.addr_bytes));
356         }
357         item.spec = &eth;
358         item.mask = &eth_mask;
359         return flow_dv_convert_modify_action(&item, modify_eth, resource,
360                                              MLX5_MODIFICATION_TYPE_SET, error);
361 }
362
363 /**
364  * Convert modify-header set TP action to DV specification.
365  *
366  * @param[in,out] resource
367  *   Pointer to the modify-header resource.
368  * @param[in] action
369  *   Pointer to action specification.
370  * @param[in] items
371  *   Pointer to rte_flow_item objects list.
372  * @param[in] attr
373  *   Pointer to flow attributes structure.
374  * @param[out] error
375  *   Pointer to the error structure.
376  *
377  * @return
378  *   0 on success, a negative errno value otherwise and rte_errno is set.
379  */
380 static int
381 flow_dv_convert_action_modify_tp
382                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
383                          const struct rte_flow_action *action,
384                          const struct rte_flow_item *items,
385                          union flow_dv_attr *attr,
386                          struct rte_flow_error *error)
387 {
388         const struct rte_flow_action_set_tp *conf =
389                 (const struct rte_flow_action_set_tp *)(action->conf);
390         struct rte_flow_item item;
391         struct rte_flow_item_udp udp;
392         struct rte_flow_item_udp udp_mask;
393         struct rte_flow_item_tcp tcp;
394         struct rte_flow_item_tcp tcp_mask;
395         struct field_modify_info *field;
396
397         if (!attr->valid)
398                 flow_dv_attr_init(items, attr);
399         if (attr->udp) {
400                 memset(&udp, 0, sizeof(udp));
401                 memset(&udp_mask, 0, sizeof(udp_mask));
402                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
403                         udp.hdr.src_port = conf->port;
404                         udp_mask.hdr.src_port =
405                                         rte_flow_item_udp_mask.hdr.src_port;
406                 } else {
407                         udp.hdr.dst_port = conf->port;
408                         udp_mask.hdr.dst_port =
409                                         rte_flow_item_udp_mask.hdr.dst_port;
410                 }
411                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
412                 item.spec = &udp;
413                 item.mask = &udp_mask;
414                 field = modify_udp;
415         }
416         if (attr->tcp) {
417                 memset(&tcp, 0, sizeof(tcp));
418                 memset(&tcp_mask, 0, sizeof(tcp_mask));
419                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
420                         tcp.hdr.src_port = conf->port;
421                         tcp_mask.hdr.src_port =
422                                         rte_flow_item_tcp_mask.hdr.src_port;
423                 } else {
424                         tcp.hdr.dst_port = conf->port;
425                         tcp_mask.hdr.dst_port =
426                                         rte_flow_item_tcp_mask.hdr.dst_port;
427                 }
428                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
429                 item.spec = &tcp;
430                 item.mask = &tcp_mask;
431                 field = modify_tcp;
432         }
433         return flow_dv_convert_modify_action(&item, field, resource,
434                                              MLX5_MODIFICATION_TYPE_SET, error);
435 }
436
437 /**
438  * Convert modify-header set TTL action to DV specification.
439  *
440  * @param[in,out] resource
441  *   Pointer to the modify-header resource.
442  * @param[in] action
443  *   Pointer to action specification.
444  * @param[in] items
445  *   Pointer to rte_flow_item objects list.
446  * @param[in] attr
447  *   Pointer to flow attributes structure.
448  * @param[out] error
449  *   Pointer to the error structure.
450  *
451  * @return
452  *   0 on success, a negative errno value otherwise and rte_errno is set.
453  */
454 static int
455 flow_dv_convert_action_modify_ttl
456                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
457                          const struct rte_flow_action *action,
458                          const struct rte_flow_item *items,
459                          union flow_dv_attr *attr,
460                          struct rte_flow_error *error)
461 {
462         const struct rte_flow_action_set_ttl *conf =
463                 (const struct rte_flow_action_set_ttl *)(action->conf);
464         struct rte_flow_item item;
465         struct rte_flow_item_ipv4 ipv4;
466         struct rte_flow_item_ipv4 ipv4_mask;
467         struct rte_flow_item_ipv6 ipv6;
468         struct rte_flow_item_ipv6 ipv6_mask;
469         struct field_modify_info *field;
470
471         if (!attr->valid)
472                 flow_dv_attr_init(items, attr);
473         if (attr->ipv4) {
474                 memset(&ipv4, 0, sizeof(ipv4));
475                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
476                 ipv4.hdr.time_to_live = conf->ttl_value;
477                 ipv4_mask.hdr.time_to_live = 0xFF;
478                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
479                 item.spec = &ipv4;
480                 item.mask = &ipv4_mask;
481                 field = modify_ipv4;
482         }
483         if (attr->ipv6) {
484                 memset(&ipv6, 0, sizeof(ipv6));
485                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
486                 ipv6.hdr.hop_limits = conf->ttl_value;
487                 ipv6_mask.hdr.hop_limits = 0xFF;
488                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
489                 item.spec = &ipv6;
490                 item.mask = &ipv6_mask;
491                 field = modify_ipv6;
492         }
493         return flow_dv_convert_modify_action(&item, field, resource,
494                                              MLX5_MODIFICATION_TYPE_SET, error);
495 }
496
497 /**
498  * Convert modify-header decrement TTL action to DV specification.
499  *
500  * @param[in,out] resource
501  *   Pointer to the modify-header resource.
502  * @param[in] action
503  *   Pointer to action specification.
504  * @param[in] items
505  *   Pointer to rte_flow_item objects list.
506  * @param[in] attr
507  *   Pointer to flow attributes structure.
508  * @param[out] error
509  *   Pointer to the error structure.
510  *
511  * @return
512  *   0 on success, a negative errno value otherwise and rte_errno is set.
513  */
514 static int
515 flow_dv_convert_action_modify_dec_ttl
516                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
517                          const struct rte_flow_item *items,
518                          union flow_dv_attr *attr,
519                          struct rte_flow_error *error)
520 {
521         struct rte_flow_item item;
522         struct rte_flow_item_ipv4 ipv4;
523         struct rte_flow_item_ipv4 ipv4_mask;
524         struct rte_flow_item_ipv6 ipv6;
525         struct rte_flow_item_ipv6 ipv6_mask;
526         struct field_modify_info *field;
527
528         if (!attr->valid)
529                 flow_dv_attr_init(items, attr);
530         if (attr->ipv4) {
531                 memset(&ipv4, 0, sizeof(ipv4));
532                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
533                 ipv4.hdr.time_to_live = 0xFF;
534                 ipv4_mask.hdr.time_to_live = 0xFF;
535                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
536                 item.spec = &ipv4;
537                 item.mask = &ipv4_mask;
538                 field = modify_ipv4;
539         }
540         if (attr->ipv6) {
541                 memset(&ipv6, 0, sizeof(ipv6));
542                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
543                 ipv6.hdr.hop_limits = 0xFF;
544                 ipv6_mask.hdr.hop_limits = 0xFF;
545                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
546                 item.spec = &ipv6;
547                 item.mask = &ipv6_mask;
548                 field = modify_ipv6;
549         }
550         return flow_dv_convert_modify_action(&item, field, resource,
551                                              MLX5_MODIFICATION_TYPE_ADD, error);
552 }
553
554 /**
555  * Validate META item.
556  *
557  * @param[in] dev
558  *   Pointer to the rte_eth_dev structure.
559  * @param[in] item
560  *   Item specification.
561  * @param[in] attr
562  *   Attributes of flow that includes this item.
563  * @param[out] error
564  *   Pointer to error structure.
565  *
566  * @return
567  *   0 on success, a negative errno value otherwise and rte_errno is set.
568  */
569 static int
570 flow_dv_validate_item_meta(struct rte_eth_dev *dev,
571                            const struct rte_flow_item *item,
572                            const struct rte_flow_attr *attr,
573                            struct rte_flow_error *error)
574 {
575         const struct rte_flow_item_meta *spec = item->spec;
576         const struct rte_flow_item_meta *mask = item->mask;
577         const struct rte_flow_item_meta nic_mask = {
578                 .data = RTE_BE32(UINT32_MAX)
579         };
580         int ret;
581         uint64_t offloads = dev->data->dev_conf.txmode.offloads;
582
583         if (!(offloads & DEV_TX_OFFLOAD_MATCH_METADATA))
584                 return rte_flow_error_set(error, EPERM,
585                                           RTE_FLOW_ERROR_TYPE_ITEM,
586                                           NULL,
587                                           "match on metadata offload "
588                                           "configuration is off for this port");
589         if (!spec)
590                 return rte_flow_error_set(error, EINVAL,
591                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
592                                           item->spec,
593                                           "data cannot be empty");
594         if (!spec->data)
595                 return rte_flow_error_set(error, EINVAL,
596                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
597                                           NULL,
598                                           "data cannot be zero");
599         if (!mask)
600                 mask = &rte_flow_item_meta_mask;
601         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
602                                         (const uint8_t *)&nic_mask,
603                                         sizeof(struct rte_flow_item_meta),
604                                         error);
605         if (ret < 0)
606                 return ret;
607         if (attr->ingress)
608                 return rte_flow_error_set(error, ENOTSUP,
609                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
610                                           NULL,
611                                           "pattern not supported for ingress");
612         return 0;
613 }
614
615 /**
616  * Validate count action.
617  *
618  * @param[in] dev
619  *   device otr.
620  * @param[out] error
621  *   Pointer to error structure.
622  *
623  * @return
624  *   0 on success, a negative errno value otherwise and rte_errno is set.
625  */
626 static int
627 flow_dv_validate_action_count(struct rte_eth_dev *dev,
628                               struct rte_flow_error *error)
629 {
630         struct mlx5_priv *priv = dev->data->dev_private;
631
632         if (!priv->config.devx)
633                 goto notsup_err;
634 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
635         return 0;
636 #endif
637 notsup_err:
638         return rte_flow_error_set
639                       (error, ENOTSUP,
640                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
641                        NULL,
642                        "count action not supported");
643 }
644
645 /**
646  * Validate the L2 encap action.
647  *
648  * @param[in] action_flags
649  *   Holds the actions detected until now.
650  * @param[in] action
651  *   Pointer to the encap action.
652  * @param[in] attr
653  *   Pointer to flow attributes
654  * @param[out] error
655  *   Pointer to error structure.
656  *
657  * @return
658  *   0 on success, a negative errno value otherwise and rte_errno is set.
659  */
660 static int
661 flow_dv_validate_action_l2_encap(uint64_t action_flags,
662                                  const struct rte_flow_action *action,
663                                  const struct rte_flow_attr *attr,
664                                  struct rte_flow_error *error)
665 {
666         if (!(action->conf))
667                 return rte_flow_error_set(error, EINVAL,
668                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
669                                           "configuration cannot be null");
670         if (action_flags & MLX5_FLOW_ACTION_DROP)
671                 return rte_flow_error_set(error, EINVAL,
672                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
673                                           "can't drop and encap in same flow");
674         if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
675                 return rte_flow_error_set(error, EINVAL,
676                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
677                                           "can only have a single encap or"
678                                           " decap action in a flow");
679         if (attr->ingress)
680                 return rte_flow_error_set(error, ENOTSUP,
681                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
682                                           NULL,
683                                           "encap action not supported for "
684                                           "ingress");
685         return 0;
686 }
687
688 /**
689  * Validate the L2 decap action.
690  *
691  * @param[in] action_flags
692  *   Holds the actions detected until now.
693  * @param[in] attr
694  *   Pointer to flow attributes
695  * @param[out] error
696  *   Pointer to error structure.
697  *
698  * @return
699  *   0 on success, a negative errno value otherwise and rte_errno is set.
700  */
701 static int
702 flow_dv_validate_action_l2_decap(uint64_t action_flags,
703                                  const struct rte_flow_attr *attr,
704                                  struct rte_flow_error *error)
705 {
706         if (action_flags & MLX5_FLOW_ACTION_DROP)
707                 return rte_flow_error_set(error, EINVAL,
708                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
709                                           "can't drop and decap in same flow");
710         if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
711                 return rte_flow_error_set(error, EINVAL,
712                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
713                                           "can only have a single encap or"
714                                           " decap action in a flow");
715         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
716                 return rte_flow_error_set(error, EINVAL,
717                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
718                                           "can't have decap action after"
719                                           " modify action");
720         if (attr->egress)
721                 return rte_flow_error_set(error, ENOTSUP,
722                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
723                                           NULL,
724                                           "decap action not supported for "
725                                           "egress");
726         return 0;
727 }
728
729 /**
730  * Validate the raw encap action.
731  *
732  * @param[in] action_flags
733  *   Holds the actions detected until now.
734  * @param[in] action
735  *   Pointer to the encap action.
736  * @param[in] attr
737  *   Pointer to flow attributes
738  * @param[out] error
739  *   Pointer to error structure.
740  *
741  * @return
742  *   0 on success, a negative errno value otherwise and rte_errno is set.
743  */
744 static int
745 flow_dv_validate_action_raw_encap(uint64_t action_flags,
746                                   const struct rte_flow_action *action,
747                                   const struct rte_flow_attr *attr,
748                                   struct rte_flow_error *error)
749 {
750         if (!(action->conf))
751                 return rte_flow_error_set(error, EINVAL,
752                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
753                                           "configuration cannot be null");
754         if (action_flags & MLX5_FLOW_ACTION_DROP)
755                 return rte_flow_error_set(error, EINVAL,
756                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
757                                           "can't drop and encap in same flow");
758         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
759                 return rte_flow_error_set(error, EINVAL,
760                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
761                                           "can only have a single encap"
762                                           " action in a flow");
763         /* encap without preceding decap is not supported for ingress */
764         if (attr->ingress && !(action_flags & MLX5_FLOW_ACTION_RAW_DECAP))
765                 return rte_flow_error_set(error, ENOTSUP,
766                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
767                                           NULL,
768                                           "encap action not supported for "
769                                           "ingress");
770         return 0;
771 }
772
773 /**
774  * Validate the raw decap action.
775  *
776  * @param[in] action_flags
777  *   Holds the actions detected until now.
778  * @param[in] action
779  *   Pointer to the encap action.
780  * @param[in] attr
781  *   Pointer to flow attributes
782  * @param[out] error
783  *   Pointer to error structure.
784  *
785  * @return
786  *   0 on success, a negative errno value otherwise and rte_errno is set.
787  */
788 static int
789 flow_dv_validate_action_raw_decap(uint64_t action_flags,
790                                   const struct rte_flow_action *action,
791                                   const struct rte_flow_attr *attr,
792                                   struct rte_flow_error *error)
793 {
794         if (action_flags & MLX5_FLOW_ACTION_DROP)
795                 return rte_flow_error_set(error, EINVAL,
796                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
797                                           "can't drop and decap in same flow");
798         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
799                 return rte_flow_error_set(error, EINVAL,
800                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
801                                           "can't have encap action before"
802                                           " decap action");
803         if (action_flags & MLX5_FLOW_DECAP_ACTIONS)
804                 return rte_flow_error_set(error, EINVAL,
805                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
806                                           "can only have a single decap"
807                                           " action in a flow");
808         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
809                 return rte_flow_error_set(error, EINVAL,
810                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
811                                           "can't have decap action after"
812                                           " modify action");
813         /* decap action is valid on egress only if it is followed by encap */
814         if (attr->egress) {
815                 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
816                        action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
817                        action++) {
818                 }
819                 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
820                         return rte_flow_error_set
821                                         (error, ENOTSUP,
822                                          RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
823                                          NULL, "decap action not supported"
824                                          " for egress");
825         }
826         return 0;
827 }
828
829 /**
830  * Find existing encap/decap resource or create and register a new one.
831  *
832  * @param dev[in, out]
833  *   Pointer to rte_eth_dev structure.
834  * @param[in, out] resource
835  *   Pointer to encap/decap resource.
836  * @parm[in, out] dev_flow
837  *   Pointer to the dev_flow.
838  * @param[out] error
839  *   pointer to error structure.
840  *
841  * @return
842  *   0 on success otherwise -errno and errno is set.
843  */
844 static int
845 flow_dv_encap_decap_resource_register
846                         (struct rte_eth_dev *dev,
847                          struct mlx5_flow_dv_encap_decap_resource *resource,
848                          struct mlx5_flow *dev_flow,
849                          struct rte_flow_error *error)
850 {
851         struct mlx5_priv *priv = dev->data->dev_private;
852         struct mlx5_ibv_shared *sh = priv->sh;
853         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
854         struct rte_flow *flow = dev_flow->flow;
855         struct mlx5dv_dr_ns *ns;
856
857         resource->flags = flow->group ? 0 : 1;
858         if (flow->ingress)
859                 ns = sh->rx_ns;
860         else
861                 ns = sh->tx_ns;
862
863         /* Lookup a matching resource from cache. */
864         LIST_FOREACH(cache_resource, &sh->encaps_decaps, next) {
865                 if (resource->reformat_type == cache_resource->reformat_type &&
866                     resource->ft_type == cache_resource->ft_type &&
867                     resource->flags == cache_resource->flags &&
868                     resource->size == cache_resource->size &&
869                     !memcmp((const void *)resource->buf,
870                             (const void *)cache_resource->buf,
871                             resource->size)) {
872                         DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
873                                 (void *)cache_resource,
874                                 rte_atomic32_read(&cache_resource->refcnt));
875                         rte_atomic32_inc(&cache_resource->refcnt);
876                         dev_flow->dv.encap_decap = cache_resource;
877                         return 0;
878                 }
879         }
880         /* Register new encap/decap resource. */
881         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
882         if (!cache_resource)
883                 return rte_flow_error_set(error, ENOMEM,
884                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
885                                           "cannot allocate resource memory");
886         *cache_resource = *resource;
887         cache_resource->verbs_action =
888                 mlx5_glue->dv_create_flow_action_packet_reformat
889                         (sh->ctx, cache_resource->reformat_type,
890                          cache_resource->ft_type, ns, cache_resource->flags,
891                          cache_resource->size,
892                          (cache_resource->size ? cache_resource->buf : NULL));
893         if (!cache_resource->verbs_action) {
894                 rte_free(cache_resource);
895                 return rte_flow_error_set(error, ENOMEM,
896                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
897                                           NULL, "cannot create action");
898         }
899         rte_atomic32_init(&cache_resource->refcnt);
900         rte_atomic32_inc(&cache_resource->refcnt);
901         LIST_INSERT_HEAD(&sh->encaps_decaps, cache_resource, next);
902         dev_flow->dv.encap_decap = cache_resource;
903         DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
904                 (void *)cache_resource,
905                 rte_atomic32_read(&cache_resource->refcnt));
906         return 0;
907 }
908
909 /**
910  * Find existing table jump resource or create and register a new one.
911  *
912  * @param dev[in, out]
913  *   Pointer to rte_eth_dev structure.
914  * @param[in, out] resource
915  *   Pointer to jump table resource.
916  * @parm[in, out] dev_flow
917  *   Pointer to the dev_flow.
918  * @param[out] error
919  *   pointer to error structure.
920  *
921  * @return
922  *   0 on success otherwise -errno and errno is set.
923  */
924 static int
925 flow_dv_jump_tbl_resource_register
926                         (struct rte_eth_dev *dev,
927                          struct mlx5_flow_dv_jump_tbl_resource *resource,
928                          struct mlx5_flow *dev_flow,
929                          struct rte_flow_error *error)
930 {
931         struct mlx5_priv *priv = dev->data->dev_private;
932         struct mlx5_ibv_shared *sh = priv->sh;
933         struct mlx5_flow_dv_jump_tbl_resource *cache_resource;
934
935         /* Lookup a matching resource from cache. */
936         LIST_FOREACH(cache_resource, &sh->jump_tbl, next) {
937                 if (resource->tbl == cache_resource->tbl) {
938                         DRV_LOG(DEBUG, "jump table resource resource %p: refcnt %d++",
939                                 (void *)cache_resource,
940                                 rte_atomic32_read(&cache_resource->refcnt));
941                         rte_atomic32_inc(&cache_resource->refcnt);
942                         dev_flow->dv.jump = cache_resource;
943                         return 0;
944                 }
945         }
946         /* Register new jump table resource. */
947         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
948         if (!cache_resource)
949                 return rte_flow_error_set(error, ENOMEM,
950                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
951                                           "cannot allocate resource memory");
952         *cache_resource = *resource;
953         cache_resource->action =
954                 mlx5_glue->dr_create_flow_action_dest_flow_tbl
955                 (resource->tbl->obj);
956         if (!cache_resource->action) {
957                 rte_free(cache_resource);
958                 return rte_flow_error_set(error, ENOMEM,
959                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
960                                           NULL, "cannot create action");
961         }
962         rte_atomic32_init(&cache_resource->refcnt);
963         rte_atomic32_inc(&cache_resource->refcnt);
964         LIST_INSERT_HEAD(&sh->jump_tbl, cache_resource, next);
965         dev_flow->dv.jump = cache_resource;
966         DRV_LOG(DEBUG, "new jump table  resource %p: refcnt %d++",
967                 (void *)cache_resource,
968                 rte_atomic32_read(&cache_resource->refcnt));
969         return 0;
970 }
971
972 /**
973  * Get the size of specific rte_flow_item_type
974  *
975  * @param[in] item_type
976  *   Tested rte_flow_item_type.
977  *
978  * @return
979  *   sizeof struct item_type, 0 if void or irrelevant.
980  */
981 static size_t
982 flow_dv_get_item_len(const enum rte_flow_item_type item_type)
983 {
984         size_t retval;
985
986         switch (item_type) {
987         case RTE_FLOW_ITEM_TYPE_ETH:
988                 retval = sizeof(struct rte_flow_item_eth);
989                 break;
990         case RTE_FLOW_ITEM_TYPE_VLAN:
991                 retval = sizeof(struct rte_flow_item_vlan);
992                 break;
993         case RTE_FLOW_ITEM_TYPE_IPV4:
994                 retval = sizeof(struct rte_flow_item_ipv4);
995                 break;
996         case RTE_FLOW_ITEM_TYPE_IPV6:
997                 retval = sizeof(struct rte_flow_item_ipv6);
998                 break;
999         case RTE_FLOW_ITEM_TYPE_UDP:
1000                 retval = sizeof(struct rte_flow_item_udp);
1001                 break;
1002         case RTE_FLOW_ITEM_TYPE_TCP:
1003                 retval = sizeof(struct rte_flow_item_tcp);
1004                 break;
1005         case RTE_FLOW_ITEM_TYPE_VXLAN:
1006                 retval = sizeof(struct rte_flow_item_vxlan);
1007                 break;
1008         case RTE_FLOW_ITEM_TYPE_GRE:
1009                 retval = sizeof(struct rte_flow_item_gre);
1010                 break;
1011         case RTE_FLOW_ITEM_TYPE_NVGRE:
1012                 retval = sizeof(struct rte_flow_item_nvgre);
1013                 break;
1014         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1015                 retval = sizeof(struct rte_flow_item_vxlan_gpe);
1016                 break;
1017         case RTE_FLOW_ITEM_TYPE_MPLS:
1018                 retval = sizeof(struct rte_flow_item_mpls);
1019                 break;
1020         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
1021         default:
1022                 retval = 0;
1023                 break;
1024         }
1025         return retval;
1026 }
1027
1028 #define MLX5_ENCAP_IPV4_VERSION         0x40
1029 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
1030 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
1031 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
1032 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
1033 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
1034 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
1035
1036 /**
1037  * Convert the encap action data from list of rte_flow_item to raw buffer
1038  *
1039  * @param[in] items
1040  *   Pointer to rte_flow_item objects list.
1041  * @param[out] buf
1042  *   Pointer to the output buffer.
1043  * @param[out] size
1044  *   Pointer to the output buffer size.
1045  * @param[out] error
1046  *   Pointer to the error structure.
1047  *
1048  * @return
1049  *   0 on success, a negative errno value otherwise and rte_errno is set.
1050  */
1051 static int
1052 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
1053                            size_t *size, struct rte_flow_error *error)
1054 {
1055         struct ether_hdr *eth = NULL;
1056         struct vlan_hdr *vlan = NULL;
1057         struct ipv4_hdr *ipv4 = NULL;
1058         struct ipv6_hdr *ipv6 = NULL;
1059         struct udp_hdr *udp = NULL;
1060         struct vxlan_hdr *vxlan = NULL;
1061         struct vxlan_gpe_hdr *vxlan_gpe = NULL;
1062         struct gre_hdr *gre = NULL;
1063         size_t len;
1064         size_t temp_size = 0;
1065
1066         if (!items)
1067                 return rte_flow_error_set(error, EINVAL,
1068                                           RTE_FLOW_ERROR_TYPE_ACTION,
1069                                           NULL, "invalid empty data");
1070         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1071                 len = flow_dv_get_item_len(items->type);
1072                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
1073                         return rte_flow_error_set(error, EINVAL,
1074                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1075                                                   (void *)items->type,
1076                                                   "items total size is too big"
1077                                                   " for encap action");
1078                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
1079                 switch (items->type) {
1080                 case RTE_FLOW_ITEM_TYPE_ETH:
1081                         eth = (struct ether_hdr *)&buf[temp_size];
1082                         break;
1083                 case RTE_FLOW_ITEM_TYPE_VLAN:
1084                         vlan = (struct vlan_hdr *)&buf[temp_size];
1085                         if (!eth)
1086                                 return rte_flow_error_set(error, EINVAL,
1087                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1088                                                 (void *)items->type,
1089                                                 "eth header not found");
1090                         if (!eth->ether_type)
1091                                 eth->ether_type = RTE_BE16(ETHER_TYPE_VLAN);
1092                         break;
1093                 case RTE_FLOW_ITEM_TYPE_IPV4:
1094                         ipv4 = (struct ipv4_hdr *)&buf[temp_size];
1095                         if (!vlan && !eth)
1096                                 return rte_flow_error_set(error, EINVAL,
1097                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1098                                                 (void *)items->type,
1099                                                 "neither eth nor vlan"
1100                                                 " header found");
1101                         if (vlan && !vlan->eth_proto)
1102                                 vlan->eth_proto = RTE_BE16(ETHER_TYPE_IPv4);
1103                         else if (eth && !eth->ether_type)
1104                                 eth->ether_type = RTE_BE16(ETHER_TYPE_IPv4);
1105                         if (!ipv4->version_ihl)
1106                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
1107                                                     MLX5_ENCAP_IPV4_IHL_MIN;
1108                         if (!ipv4->time_to_live)
1109                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
1110                         break;
1111                 case RTE_FLOW_ITEM_TYPE_IPV6:
1112                         ipv6 = (struct ipv6_hdr *)&buf[temp_size];
1113                         if (!vlan && !eth)
1114                                 return rte_flow_error_set(error, EINVAL,
1115                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1116                                                 (void *)items->type,
1117                                                 "neither eth nor vlan"
1118                                                 " header found");
1119                         if (vlan && !vlan->eth_proto)
1120                                 vlan->eth_proto = RTE_BE16(ETHER_TYPE_IPv6);
1121                         else if (eth && !eth->ether_type)
1122                                 eth->ether_type = RTE_BE16(ETHER_TYPE_IPv6);
1123                         if (!ipv6->vtc_flow)
1124                                 ipv6->vtc_flow =
1125                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
1126                         if (!ipv6->hop_limits)
1127                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
1128                         break;
1129                 case RTE_FLOW_ITEM_TYPE_UDP:
1130                         udp = (struct udp_hdr *)&buf[temp_size];
1131                         if (!ipv4 && !ipv6)
1132                                 return rte_flow_error_set(error, EINVAL,
1133                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1134                                                 (void *)items->type,
1135                                                 "ip header not found");
1136                         if (ipv4 && !ipv4->next_proto_id)
1137                                 ipv4->next_proto_id = IPPROTO_UDP;
1138                         else if (ipv6 && !ipv6->proto)
1139                                 ipv6->proto = IPPROTO_UDP;
1140                         break;
1141                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1142                         vxlan = (struct vxlan_hdr *)&buf[temp_size];
1143                         if (!udp)
1144                                 return rte_flow_error_set(error, EINVAL,
1145                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1146                                                 (void *)items->type,
1147                                                 "udp header not found");
1148                         if (!udp->dst_port)
1149                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
1150                         if (!vxlan->vx_flags)
1151                                 vxlan->vx_flags =
1152                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
1153                         break;
1154                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1155                         vxlan_gpe = (struct vxlan_gpe_hdr *)&buf[temp_size];
1156                         if (!udp)
1157                                 return rte_flow_error_set(error, EINVAL,
1158                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1159                                                 (void *)items->type,
1160                                                 "udp header not found");
1161                         if (!vxlan_gpe->proto)
1162                                 return rte_flow_error_set(error, EINVAL,
1163                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1164                                                 (void *)items->type,
1165                                                 "next protocol not found");
1166                         if (!udp->dst_port)
1167                                 udp->dst_port =
1168                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
1169                         if (!vxlan_gpe->vx_flags)
1170                                 vxlan_gpe->vx_flags =
1171                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
1172                         break;
1173                 case RTE_FLOW_ITEM_TYPE_GRE:
1174                 case RTE_FLOW_ITEM_TYPE_NVGRE:
1175                         gre = (struct gre_hdr *)&buf[temp_size];
1176                         if (!gre->proto)
1177                                 return rte_flow_error_set(error, EINVAL,
1178                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1179                                                 (void *)items->type,
1180                                                 "next protocol not found");
1181                         if (!ipv4 && !ipv6)
1182                                 return rte_flow_error_set(error, EINVAL,
1183                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1184                                                 (void *)items->type,
1185                                                 "ip header not found");
1186                         if (ipv4 && !ipv4->next_proto_id)
1187                                 ipv4->next_proto_id = IPPROTO_GRE;
1188                         else if (ipv6 && !ipv6->proto)
1189                                 ipv6->proto = IPPROTO_GRE;
1190                         break;
1191                 case RTE_FLOW_ITEM_TYPE_VOID:
1192                         break;
1193                 default:
1194                         return rte_flow_error_set(error, EINVAL,
1195                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1196                                                   (void *)items->type,
1197                                                   "unsupported item type");
1198                         break;
1199                 }
1200                 temp_size += len;
1201         }
1202         *size = temp_size;
1203         return 0;
1204 }
1205
1206 /**
1207  * Convert L2 encap action to DV specification.
1208  *
1209  * @param[in] dev
1210  *   Pointer to rte_eth_dev structure.
1211  * @param[in] action
1212  *   Pointer to action structure.
1213  * @param[in, out] dev_flow
1214  *   Pointer to the mlx5_flow.
1215  * @param[out] error
1216  *   Pointer to the error structure.
1217  *
1218  * @return
1219  *   0 on success, a negative errno value otherwise and rte_errno is set.
1220  */
1221 static int
1222 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
1223                                const struct rte_flow_action *action,
1224                                struct mlx5_flow *dev_flow,
1225                                struct rte_flow_error *error)
1226 {
1227         const struct rte_flow_item *encap_data;
1228         const struct rte_flow_action_raw_encap *raw_encap_data;
1229         struct mlx5_flow_dv_encap_decap_resource res = {
1230                 .reformat_type =
1231                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
1232                 .ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
1233         };
1234
1235         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
1236                 raw_encap_data =
1237                         (const struct rte_flow_action_raw_encap *)action->conf;
1238                 res.size = raw_encap_data->size;
1239                 memcpy(res.buf, raw_encap_data->data, res.size);
1240         } else {
1241                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
1242                         encap_data =
1243                                 ((const struct rte_flow_action_vxlan_encap *)
1244                                                 action->conf)->definition;
1245                 else
1246                         encap_data =
1247                                 ((const struct rte_flow_action_nvgre_encap *)
1248                                                 action->conf)->definition;
1249                 if (flow_dv_convert_encap_data(encap_data, res.buf,
1250                                                &res.size, error))
1251                         return -rte_errno;
1252         }
1253         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1254                 return rte_flow_error_set(error, EINVAL,
1255                                           RTE_FLOW_ERROR_TYPE_ACTION,
1256                                           NULL, "can't create L2 encap action");
1257         return 0;
1258 }
1259
1260 /**
1261  * Convert L2 decap action to DV specification.
1262  *
1263  * @param[in] dev
1264  *   Pointer to rte_eth_dev structure.
1265  * @param[in, out] dev_flow
1266  *   Pointer to the mlx5_flow.
1267  * @param[out] error
1268  *   Pointer to the error structure.
1269  *
1270  * @return
1271  *   0 on success, a negative errno value otherwise and rte_errno is set.
1272  */
1273 static int
1274 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
1275                                struct mlx5_flow *dev_flow,
1276                                struct rte_flow_error *error)
1277 {
1278         struct mlx5_flow_dv_encap_decap_resource res = {
1279                 .size = 0,
1280                 .reformat_type =
1281                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
1282                 .ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
1283         };
1284
1285         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1286                 return rte_flow_error_set(error, EINVAL,
1287                                           RTE_FLOW_ERROR_TYPE_ACTION,
1288                                           NULL, "can't create L2 decap action");
1289         return 0;
1290 }
1291
1292 /**
1293  * Convert raw decap/encap (L3 tunnel) action to DV specification.
1294  *
1295  * @param[in] dev
1296  *   Pointer to rte_eth_dev structure.
1297  * @param[in] action
1298  *   Pointer to action structure.
1299  * @param[in, out] dev_flow
1300  *   Pointer to the mlx5_flow.
1301  * @param[in] attr
1302  *   Pointer to the flow attributes.
1303  * @param[out] error
1304  *   Pointer to the error structure.
1305  *
1306  * @return
1307  *   0 on success, a negative errno value otherwise and rte_errno is set.
1308  */
1309 static int
1310 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
1311                                 const struct rte_flow_action *action,
1312                                 struct mlx5_flow *dev_flow,
1313                                 const struct rte_flow_attr *attr,
1314                                 struct rte_flow_error *error)
1315 {
1316         const struct rte_flow_action_raw_encap *encap_data;
1317         struct mlx5_flow_dv_encap_decap_resource res;
1318
1319         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
1320         res.size = encap_data->size;
1321         memcpy(res.buf, encap_data->data, res.size);
1322         res.reformat_type = attr->egress ?
1323                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL :
1324                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
1325         res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
1326                                      MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
1327         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1328                 return rte_flow_error_set(error, EINVAL,
1329                                           RTE_FLOW_ERROR_TYPE_ACTION,
1330                                           NULL, "can't create encap action");
1331         return 0;
1332 }
1333
1334 /**
1335  * Validate the modify-header actions.
1336  *
1337  * @param[in] action_flags
1338  *   Holds the actions detected until now.
1339  * @param[in] action
1340  *   Pointer to the modify action.
1341  * @param[out] error
1342  *   Pointer to error structure.
1343  *
1344  * @return
1345  *   0 on success, a negative errno value otherwise and rte_errno is set.
1346  */
1347 static int
1348 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
1349                                    const struct rte_flow_action *action,
1350                                    struct rte_flow_error *error)
1351 {
1352         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
1353                 return rte_flow_error_set(error, EINVAL,
1354                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1355                                           NULL, "action configuration not set");
1356         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
1357                 return rte_flow_error_set(error, EINVAL,
1358                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1359                                           "can't have encap action before"
1360                                           " modify action");
1361         return 0;
1362 }
1363
1364 /**
1365  * Validate the modify-header MAC address actions.
1366  *
1367  * @param[in] action_flags
1368  *   Holds the actions detected until now.
1369  * @param[in] action
1370  *   Pointer to the modify action.
1371  * @param[in] item_flags
1372  *   Holds the items detected.
1373  * @param[out] error
1374  *   Pointer to error structure.
1375  *
1376  * @return
1377  *   0 on success, a negative errno value otherwise and rte_errno is set.
1378  */
1379 static int
1380 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
1381                                    const struct rte_flow_action *action,
1382                                    const uint64_t item_flags,
1383                                    struct rte_flow_error *error)
1384 {
1385         int ret = 0;
1386
1387         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1388         if (!ret) {
1389                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
1390                         return rte_flow_error_set(error, EINVAL,
1391                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1392                                                   NULL,
1393                                                   "no L2 item in pattern");
1394         }
1395         return ret;
1396 }
1397
1398 /**
1399  * Validate the modify-header IPv4 address actions.
1400  *
1401  * @param[in] action_flags
1402  *   Holds the actions detected until now.
1403  * @param[in] action
1404  *   Pointer to the modify action.
1405  * @param[in] item_flags
1406  *   Holds the items detected.
1407  * @param[out] error
1408  *   Pointer to error structure.
1409  *
1410  * @return
1411  *   0 on success, a negative errno value otherwise and rte_errno is set.
1412  */
1413 static int
1414 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
1415                                     const struct rte_flow_action *action,
1416                                     const uint64_t item_flags,
1417                                     struct rte_flow_error *error)
1418 {
1419         int ret = 0;
1420
1421         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1422         if (!ret) {
1423                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
1424                         return rte_flow_error_set(error, EINVAL,
1425                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1426                                                   NULL,
1427                                                   "no ipv4 item in pattern");
1428         }
1429         return ret;
1430 }
1431
1432 /**
1433  * Validate the modify-header IPv6 address actions.
1434  *
1435  * @param[in] action_flags
1436  *   Holds the actions detected until now.
1437  * @param[in] action
1438  *   Pointer to the modify action.
1439  * @param[in] item_flags
1440  *   Holds the items detected.
1441  * @param[out] error
1442  *   Pointer to error structure.
1443  *
1444  * @return
1445  *   0 on success, a negative errno value otherwise and rte_errno is set.
1446  */
1447 static int
1448 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
1449                                     const struct rte_flow_action *action,
1450                                     const uint64_t item_flags,
1451                                     struct rte_flow_error *error)
1452 {
1453         int ret = 0;
1454
1455         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1456         if (!ret) {
1457                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
1458                         return rte_flow_error_set(error, EINVAL,
1459                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1460                                                   NULL,
1461                                                   "no ipv6 item in pattern");
1462         }
1463         return ret;
1464 }
1465
1466 /**
1467  * Validate the modify-header TP actions.
1468  *
1469  * @param[in] action_flags
1470  *   Holds the actions detected until now.
1471  * @param[in] action
1472  *   Pointer to the modify action.
1473  * @param[in] item_flags
1474  *   Holds the items detected.
1475  * @param[out] error
1476  *   Pointer to error structure.
1477  *
1478  * @return
1479  *   0 on success, a negative errno value otherwise and rte_errno is set.
1480  */
1481 static int
1482 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
1483                                   const struct rte_flow_action *action,
1484                                   const uint64_t item_flags,
1485                                   struct rte_flow_error *error)
1486 {
1487         int ret = 0;
1488
1489         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1490         if (!ret) {
1491                 if (!(item_flags & MLX5_FLOW_LAYER_L4))
1492                         return rte_flow_error_set(error, EINVAL,
1493                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1494                                                   NULL, "no transport layer "
1495                                                   "in pattern");
1496         }
1497         return ret;
1498 }
1499
1500 /**
1501  * Validate the modify-header TTL actions.
1502  *
1503  * @param[in] action_flags
1504  *   Holds the actions detected until now.
1505  * @param[in] action
1506  *   Pointer to the modify action.
1507  * @param[in] item_flags
1508  *   Holds the items detected.
1509  * @param[out] error
1510  *   Pointer to error structure.
1511  *
1512  * @return
1513  *   0 on success, a negative errno value otherwise and rte_errno is set.
1514  */
1515 static int
1516 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
1517                                    const struct rte_flow_action *action,
1518                                    const uint64_t item_flags,
1519                                    struct rte_flow_error *error)
1520 {
1521         int ret = 0;
1522
1523         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1524         if (!ret) {
1525                 if (!(item_flags & MLX5_FLOW_LAYER_L3))
1526                         return rte_flow_error_set(error, EINVAL,
1527                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1528                                                   NULL,
1529                                                   "no IP protocol in pattern");
1530         }
1531         return ret;
1532 }
1533
1534 /**
1535  * Validate jump action.
1536  *
1537  * @param[in] action
1538  *   Pointer to the modify action.
1539  * @param[in] group
1540  *   The group of the current flow.
1541  * @param[out] error
1542  *   Pointer to error structure.
1543  *
1544  * @return
1545  *   0 on success, a negative errno value otherwise and rte_errno is set.
1546  */
1547 static int
1548 flow_dv_validate_action_jump(const struct rte_flow_action *action,
1549                              uint32_t group,
1550                              struct rte_flow_error *error)
1551 {
1552         if (action->type != RTE_FLOW_ACTION_TYPE_JUMP && !action->conf)
1553                 return rte_flow_error_set(error, EINVAL,
1554                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1555                                           NULL, "action configuration not set");
1556         if (group >= ((const struct rte_flow_action_jump *)action->conf)->group)
1557                 return rte_flow_error_set(error, EINVAL,
1558                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1559                                           "target group must be higher then"
1560                                           " the current flow group");
1561         return 0;
1562 }
1563
1564
1565 /**
1566  * Find existing modify-header resource or create and register a new one.
1567  *
1568  * @param dev[in, out]
1569  *   Pointer to rte_eth_dev structure.
1570  * @param[in, out] resource
1571  *   Pointer to modify-header resource.
1572  * @parm[in, out] dev_flow
1573  *   Pointer to the dev_flow.
1574  * @param[out] error
1575  *   pointer to error structure.
1576  *
1577  * @return
1578  *   0 on success otherwise -errno and errno is set.
1579  */
1580 static int
1581 flow_dv_modify_hdr_resource_register
1582                         (struct rte_eth_dev *dev,
1583                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1584                          struct mlx5_flow *dev_flow,
1585                          struct rte_flow_error *error)
1586 {
1587         struct mlx5_priv *priv = dev->data->dev_private;
1588         struct mlx5_ibv_shared *sh = priv->sh;
1589         struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
1590
1591         struct mlx5dv_dr_ns *ns =
1592                 resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX  ?
1593                 sh->tx_ns : sh->rx_ns;
1594
1595         /* Lookup a matching resource from cache. */
1596         LIST_FOREACH(cache_resource, &sh->modify_cmds, next) {
1597                 if (resource->ft_type == cache_resource->ft_type &&
1598                     resource->actions_num == cache_resource->actions_num &&
1599                     !memcmp((const void *)resource->actions,
1600                             (const void *)cache_resource->actions,
1601                             (resource->actions_num *
1602                                             sizeof(resource->actions[0])))) {
1603                         DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++",
1604                                 (void *)cache_resource,
1605                                 rte_atomic32_read(&cache_resource->refcnt));
1606                         rte_atomic32_inc(&cache_resource->refcnt);
1607                         dev_flow->dv.modify_hdr = cache_resource;
1608                         return 0;
1609                 }
1610         }
1611         /* Register new modify-header resource. */
1612         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1613         if (!cache_resource)
1614                 return rte_flow_error_set(error, ENOMEM,
1615                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1616                                           "cannot allocate resource memory");
1617         *cache_resource = *resource;
1618         cache_resource->verbs_action =
1619                 mlx5_glue->dv_create_flow_action_modify_header
1620                                         (sh->ctx, cache_resource->ft_type,
1621                                          ns, 0,
1622                                          cache_resource->actions_num *
1623                                          sizeof(cache_resource->actions[0]),
1624                                          (uint64_t *)cache_resource->actions);
1625         if (!cache_resource->verbs_action) {
1626                 rte_free(cache_resource);
1627                 return rte_flow_error_set(error, ENOMEM,
1628                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1629                                           NULL, "cannot create action");
1630         }
1631         rte_atomic32_init(&cache_resource->refcnt);
1632         rte_atomic32_inc(&cache_resource->refcnt);
1633         LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
1634         dev_flow->dv.modify_hdr = cache_resource;
1635         DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
1636                 (void *)cache_resource,
1637                 rte_atomic32_read(&cache_resource->refcnt));
1638         return 0;
1639 }
1640
1641 /**
1642  * Get or create a flow counter.
1643  *
1644  * @param[in] dev
1645  *   Pointer to the Ethernet device structure.
1646  * @param[in] shared
1647  *   Indicate if this counter is shared with other flows.
1648  * @param[in] id
1649  *   Counter identifier.
1650  *
1651  * @return
1652  *   pointer to flow counter on success, NULL otherwise and rte_errno is set.
1653  */
1654 static struct mlx5_flow_counter *
1655 flow_dv_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
1656 {
1657         struct mlx5_priv *priv = dev->data->dev_private;
1658         struct mlx5_flow_counter *cnt = NULL;
1659         struct mlx5_devx_counter_set *dcs = NULL;
1660         int ret;
1661
1662         if (!priv->config.devx) {
1663                 ret = -ENOTSUP;
1664                 goto error_exit;
1665         }
1666         if (shared) {
1667                 LIST_FOREACH(cnt, &priv->flow_counters, next) {
1668                         if (cnt->shared && cnt->id == id) {
1669                                 cnt->ref_cnt++;
1670                                 return cnt;
1671                         }
1672                 }
1673         }
1674         cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
1675         dcs = rte_calloc(__func__, 1, sizeof(*dcs), 0);
1676         if (!dcs || !cnt) {
1677                 ret = -ENOMEM;
1678                 goto error_exit;
1679         }
1680         ret = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, dcs);
1681         if (ret)
1682                 goto error_exit;
1683         struct mlx5_flow_counter tmpl = {
1684                 .shared = shared,
1685                 .ref_cnt = 1,
1686                 .id = id,
1687                 .dcs = dcs,
1688         };
1689         tmpl.action = mlx5_glue->dv_create_flow_action_counter(dcs->obj, 0);
1690         if (!tmpl.action) {
1691                 ret = errno;
1692                 goto error_exit;
1693         }
1694         *cnt = tmpl;
1695         LIST_INSERT_HEAD(&priv->flow_counters, cnt, next);
1696         return cnt;
1697 error_exit:
1698         rte_free(cnt);
1699         rte_free(dcs);
1700         rte_errno = -ret;
1701         return NULL;
1702 }
1703
1704 /**
1705  * Release a flow counter.
1706  *
1707  * @param[in] counter
1708  *   Pointer to the counter handler.
1709  */
1710 static void
1711 flow_dv_counter_release(struct mlx5_flow_counter *counter)
1712 {
1713         int ret;
1714
1715         if (!counter)
1716                 return;
1717         if (--counter->ref_cnt == 0) {
1718                 ret = mlx5_devx_cmd_flow_counter_free(counter->dcs->obj);
1719                 if (ret)
1720                         DRV_LOG(ERR, "Failed to free devx counters, %d", ret);
1721                 LIST_REMOVE(counter, next);
1722                 rte_free(counter->dcs);
1723                 rte_free(counter);
1724         }
1725 }
1726
1727 /**
1728  * Verify the @p attributes will be correctly understood by the NIC and store
1729  * them in the @p flow if everything is correct.
1730  *
1731  * @param[in] dev
1732  *   Pointer to dev struct.
1733  * @param[in] attributes
1734  *   Pointer to flow attributes
1735  * @param[out] error
1736  *   Pointer to error structure.
1737  *
1738  * @return
1739  *   0 on success, a negative errno value otherwise and rte_errno is set.
1740  */
1741 static int
1742 flow_dv_validate_attributes(struct rte_eth_dev *dev,
1743                             const struct rte_flow_attr *attributes,
1744                             struct rte_flow_error *error)
1745 {
1746         struct mlx5_priv *priv = dev->data->dev_private;
1747         uint32_t priority_max = priv->config.flow_prio - 1;
1748
1749 #ifndef HAVE_MLX5DV_DR
1750         if (attributes->group)
1751                 return rte_flow_error_set(error, ENOTSUP,
1752                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1753                                           NULL,
1754                                           "groups is not supported");
1755 #endif
1756         if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
1757             attributes->priority >= priority_max)
1758                 return rte_flow_error_set(error, ENOTSUP,
1759                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1760                                           NULL,
1761                                           "priority out of range");
1762         if (attributes->transfer)
1763                 return rte_flow_error_set(error, ENOTSUP,
1764                                           RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1765                                           NULL,
1766                                           "transfer is not supported");
1767         if (!(attributes->egress ^ attributes->ingress))
1768                 return rte_flow_error_set(error, ENOTSUP,
1769                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1770                                           "must specify exactly one of "
1771                                           "ingress or egress");
1772         return 0;
1773 }
1774
1775 /**
1776  * Internal validation function. For validating both actions and items.
1777  *
1778  * @param[in] dev
1779  *   Pointer to the rte_eth_dev structure.
1780  * @param[in] attr
1781  *   Pointer to the flow attributes.
1782  * @param[in] items
1783  *   Pointer to the list of items.
1784  * @param[in] actions
1785  *   Pointer to the list of actions.
1786  * @param[out] error
1787  *   Pointer to the error structure.
1788  *
1789  * @return
1790  *   0 on success, a negative errno value otherwise and rte_errno is set.
1791  */
1792 static int
1793 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
1794                  const struct rte_flow_item items[],
1795                  const struct rte_flow_action actions[],
1796                  struct rte_flow_error *error)
1797 {
1798         int ret;
1799         uint64_t action_flags = 0;
1800         uint64_t item_flags = 0;
1801         uint64_t last_item = 0;
1802         int tunnel = 0;
1803         uint8_t next_protocol = 0xff;
1804         int actions_n = 0;
1805
1806         if (items == NULL)
1807                 return -1;
1808         ret = flow_dv_validate_attributes(dev, attr, error);
1809         if (ret < 0)
1810                 return ret;
1811         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1812                 tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1813                 switch (items->type) {
1814                 case RTE_FLOW_ITEM_TYPE_VOID:
1815                         break;
1816                 case RTE_FLOW_ITEM_TYPE_ETH:
1817                         ret = mlx5_flow_validate_item_eth(items, item_flags,
1818                                                           error);
1819                         if (ret < 0)
1820                                 return ret;
1821                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1822                                              MLX5_FLOW_LAYER_OUTER_L2;
1823                         break;
1824                 case RTE_FLOW_ITEM_TYPE_VLAN:
1825                         ret = mlx5_flow_validate_item_vlan(items, item_flags,
1826                                                            error);
1827                         if (ret < 0)
1828                                 return ret;
1829                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
1830                                              MLX5_FLOW_LAYER_OUTER_VLAN;
1831                         break;
1832                 case RTE_FLOW_ITEM_TYPE_IPV4:
1833                         ret = mlx5_flow_validate_item_ipv4(items, item_flags,
1834                                                            NULL, error);
1835                         if (ret < 0)
1836                                 return ret;
1837                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1838                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1839                         if (items->mask != NULL &&
1840                             ((const struct rte_flow_item_ipv4 *)
1841                              items->mask)->hdr.next_proto_id) {
1842                                 next_protocol =
1843                                         ((const struct rte_flow_item_ipv4 *)
1844                                          (items->spec))->hdr.next_proto_id;
1845                                 next_protocol &=
1846                                         ((const struct rte_flow_item_ipv4 *)
1847                                          (items->mask))->hdr.next_proto_id;
1848                         } else {
1849                                 /* Reset for inner layer. */
1850                                 next_protocol = 0xff;
1851                         }
1852                         break;
1853                 case RTE_FLOW_ITEM_TYPE_IPV6:
1854                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
1855                                                            NULL, error);
1856                         if (ret < 0)
1857                                 return ret;
1858                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1859                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1860                         if (items->mask != NULL &&
1861                             ((const struct rte_flow_item_ipv6 *)
1862                              items->mask)->hdr.proto) {
1863                                 next_protocol =
1864                                         ((const struct rte_flow_item_ipv6 *)
1865                                          items->spec)->hdr.proto;
1866                                 next_protocol &=
1867                                         ((const struct rte_flow_item_ipv6 *)
1868                                          items->mask)->hdr.proto;
1869                         } else {
1870                                 /* Reset for inner layer. */
1871                                 next_protocol = 0xff;
1872                         }
1873                         break;
1874                 case RTE_FLOW_ITEM_TYPE_TCP:
1875                         ret = mlx5_flow_validate_item_tcp
1876                                                 (items, item_flags,
1877                                                  next_protocol,
1878                                                  &rte_flow_item_tcp_mask,
1879                                                  error);
1880                         if (ret < 0)
1881                                 return ret;
1882                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
1883                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
1884                         break;
1885                 case RTE_FLOW_ITEM_TYPE_UDP:
1886                         ret = mlx5_flow_validate_item_udp(items, item_flags,
1887                                                           next_protocol,
1888                                                           error);
1889                         if (ret < 0)
1890                                 return ret;
1891                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
1892                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
1893                         break;
1894                 case RTE_FLOW_ITEM_TYPE_GRE:
1895                 case RTE_FLOW_ITEM_TYPE_NVGRE:
1896                         ret = mlx5_flow_validate_item_gre(items, item_flags,
1897                                                           next_protocol, error);
1898                         if (ret < 0)
1899                                 return ret;
1900                         last_item = MLX5_FLOW_LAYER_GRE;
1901                         break;
1902                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1903                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
1904                                                             error);
1905                         if (ret < 0)
1906                                 return ret;
1907                         last_item = MLX5_FLOW_LAYER_VXLAN;
1908                         break;
1909                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1910                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
1911                                                                 item_flags, dev,
1912                                                                 error);
1913                         if (ret < 0)
1914                                 return ret;
1915                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
1916                         break;
1917                 case RTE_FLOW_ITEM_TYPE_MPLS:
1918                         ret = mlx5_flow_validate_item_mpls(dev, items,
1919                                                            item_flags,
1920                                                            last_item, error);
1921                         if (ret < 0)
1922                                 return ret;
1923                         last_item = MLX5_FLOW_LAYER_MPLS;
1924                         break;
1925                 case RTE_FLOW_ITEM_TYPE_META:
1926                         ret = flow_dv_validate_item_meta(dev, items, attr,
1927                                                          error);
1928                         if (ret < 0)
1929                                 return ret;
1930                         last_item = MLX5_FLOW_ITEM_METADATA;
1931                         break;
1932                 default:
1933                         return rte_flow_error_set(error, ENOTSUP,
1934                                                   RTE_FLOW_ERROR_TYPE_ITEM,
1935                                                   NULL, "item not supported");
1936                 }
1937                 item_flags |= last_item;
1938         }
1939         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1940                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
1941                         return rte_flow_error_set(error, ENOTSUP,
1942                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1943                                                   actions, "too many actions");
1944                 switch (actions->type) {
1945                 case RTE_FLOW_ACTION_TYPE_VOID:
1946                         break;
1947                 case RTE_FLOW_ACTION_TYPE_FLAG:
1948                         ret = mlx5_flow_validate_action_flag(action_flags,
1949                                                              attr, error);
1950                         if (ret < 0)
1951                                 return ret;
1952                         action_flags |= MLX5_FLOW_ACTION_FLAG;
1953                         ++actions_n;
1954                         break;
1955                 case RTE_FLOW_ACTION_TYPE_MARK:
1956                         ret = mlx5_flow_validate_action_mark(actions,
1957                                                              action_flags,
1958                                                              attr, error);
1959                         if (ret < 0)
1960                                 return ret;
1961                         action_flags |= MLX5_FLOW_ACTION_MARK;
1962                         ++actions_n;
1963                         break;
1964                 case RTE_FLOW_ACTION_TYPE_DROP:
1965                         ret = mlx5_flow_validate_action_drop(action_flags,
1966                                                              attr, error);
1967                         if (ret < 0)
1968                                 return ret;
1969                         action_flags |= MLX5_FLOW_ACTION_DROP;
1970                         ++actions_n;
1971                         break;
1972                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1973                         ret = mlx5_flow_validate_action_queue(actions,
1974                                                               action_flags, dev,
1975                                                               attr, error);
1976                         if (ret < 0)
1977                                 return ret;
1978                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
1979                         ++actions_n;
1980                         break;
1981                 case RTE_FLOW_ACTION_TYPE_RSS:
1982                         ret = mlx5_flow_validate_action_rss(actions,
1983                                                             action_flags, dev,
1984                                                             attr, error);
1985                         if (ret < 0)
1986                                 return ret;
1987                         action_flags |= MLX5_FLOW_ACTION_RSS;
1988                         ++actions_n;
1989                         break;
1990                 case RTE_FLOW_ACTION_TYPE_COUNT:
1991                         ret = flow_dv_validate_action_count(dev, error);
1992                         if (ret < 0)
1993                                 return ret;
1994                         action_flags |= MLX5_FLOW_ACTION_COUNT;
1995                         ++actions_n;
1996                         break;
1997                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
1998                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
1999                         ret = flow_dv_validate_action_l2_encap(action_flags,
2000                                                                actions, attr,
2001                                                                error);
2002                         if (ret < 0)
2003                                 return ret;
2004                         action_flags |= actions->type ==
2005                                         RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
2006                                         MLX5_FLOW_ACTION_VXLAN_ENCAP :
2007                                         MLX5_FLOW_ACTION_NVGRE_ENCAP;
2008                         ++actions_n;
2009                         break;
2010                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
2011                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
2012                         ret = flow_dv_validate_action_l2_decap(action_flags,
2013                                                                attr, error);
2014                         if (ret < 0)
2015                                 return ret;
2016                         action_flags |= actions->type ==
2017                                         RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
2018                                         MLX5_FLOW_ACTION_VXLAN_DECAP :
2019                                         MLX5_FLOW_ACTION_NVGRE_DECAP;
2020                         ++actions_n;
2021                         break;
2022                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
2023                         ret = flow_dv_validate_action_raw_encap(action_flags,
2024                                                                 actions, attr,
2025                                                                 error);
2026                         if (ret < 0)
2027                                 return ret;
2028                         action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
2029                         ++actions_n;
2030                         break;
2031                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
2032                         ret = flow_dv_validate_action_raw_decap(action_flags,
2033                                                                 actions, attr,
2034                                                                 error);
2035                         if (ret < 0)
2036                                 return ret;
2037                         action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
2038                         ++actions_n;
2039                         break;
2040                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
2041                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
2042                         ret = flow_dv_validate_action_modify_mac(action_flags,
2043                                                                  actions,
2044                                                                  item_flags,
2045                                                                  error);
2046                         if (ret < 0)
2047                                 return ret;
2048                         /* Count all modify-header actions as one action. */
2049                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2050                                 ++actions_n;
2051                         action_flags |= actions->type ==
2052                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
2053                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
2054                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
2055                         break;
2056
2057                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
2058                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
2059                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
2060                                                                   actions,
2061                                                                   item_flags,
2062                                                                   error);
2063                         if (ret < 0)
2064                                 return ret;
2065                         /* Count all modify-header actions as one action. */
2066                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2067                                 ++actions_n;
2068                         action_flags |= actions->type ==
2069                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
2070                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
2071                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
2072                         break;
2073                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
2074                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
2075                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
2076                                                                   actions,
2077                                                                   item_flags,
2078                                                                   error);
2079                         if (ret < 0)
2080                                 return ret;
2081                         /* Count all modify-header actions as one action. */
2082                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2083                                 ++actions_n;
2084                         action_flags |= actions->type ==
2085                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
2086                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
2087                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
2088                         break;
2089                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
2090                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
2091                         ret = flow_dv_validate_action_modify_tp(action_flags,
2092                                                                 actions,
2093                                                                 item_flags,
2094                                                                 error);
2095                         if (ret < 0)
2096                                 return ret;
2097                         /* Count all modify-header actions as one action. */
2098                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2099                                 ++actions_n;
2100                         action_flags |= actions->type ==
2101                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
2102                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
2103                                                 MLX5_FLOW_ACTION_SET_TP_DST;
2104                         break;
2105                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
2106                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
2107                         ret = flow_dv_validate_action_modify_ttl(action_flags,
2108                                                                  actions,
2109                                                                  item_flags,
2110                                                                  error);
2111                         if (ret < 0)
2112                                 return ret;
2113                         /* Count all modify-header actions as one action. */
2114                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2115                                 ++actions_n;
2116                         action_flags |= actions->type ==
2117                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
2118                                                 MLX5_FLOW_ACTION_SET_TTL :
2119                                                 MLX5_FLOW_ACTION_DEC_TTL;
2120                         break;
2121                 case RTE_FLOW_ACTION_TYPE_JUMP:
2122                         ret = flow_dv_validate_action_jump(actions,
2123                                                            attr->group, error);
2124                         if (ret)
2125                                 return ret;
2126                         ++actions_n;
2127                         action_flags |= MLX5_FLOW_ACTION_JUMP;
2128                         break;
2129                 default:
2130                         return rte_flow_error_set(error, ENOTSUP,
2131                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2132                                                   actions,
2133                                                   "action not supported");
2134                 }
2135         }
2136         if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
2137                 return rte_flow_error_set(error, EINVAL,
2138                                           RTE_FLOW_ERROR_TYPE_ACTION, actions,
2139                                           "no fate action is found");
2140         return 0;
2141 }
2142
2143 /**
2144  * Internal preparation function. Allocates the DV flow size,
2145  * this size is constant.
2146  *
2147  * @param[in] attr
2148  *   Pointer to the flow attributes.
2149  * @param[in] items
2150  *   Pointer to the list of items.
2151  * @param[in] actions
2152  *   Pointer to the list of actions.
2153  * @param[out] error
2154  *   Pointer to the error structure.
2155  *
2156  * @return
2157  *   Pointer to mlx5_flow object on success,
2158  *   otherwise NULL and rte_errno is set.
2159  */
2160 static struct mlx5_flow *
2161 flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
2162                 const struct rte_flow_item items[] __rte_unused,
2163                 const struct rte_flow_action actions[] __rte_unused,
2164                 struct rte_flow_error *error)
2165 {
2166         uint32_t size = sizeof(struct mlx5_flow);
2167         struct mlx5_flow *flow;
2168
2169         flow = rte_calloc(__func__, 1, size, 0);
2170         if (!flow) {
2171                 rte_flow_error_set(error, ENOMEM,
2172                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2173                                    "not enough memory to create flow");
2174                 return NULL;
2175         }
2176         flow->dv.value.size = MLX5_ST_SZ_DB(fte_match_param);
2177         return flow;
2178 }
2179
2180 #ifndef NDEBUG
2181 /**
2182  * Sanity check for match mask and value. Similar to check_valid_spec() in
2183  * kernel driver. If unmasked bit is present in value, it returns failure.
2184  *
2185  * @param match_mask
2186  *   pointer to match mask buffer.
2187  * @param match_value
2188  *   pointer to match value buffer.
2189  *
2190  * @return
2191  *   0 if valid, -EINVAL otherwise.
2192  */
2193 static int
2194 flow_dv_check_valid_spec(void *match_mask, void *match_value)
2195 {
2196         uint8_t *m = match_mask;
2197         uint8_t *v = match_value;
2198         unsigned int i;
2199
2200         for (i = 0; i < MLX5_ST_SZ_DB(fte_match_param); ++i) {
2201                 if (v[i] & ~m[i]) {
2202                         DRV_LOG(ERR,
2203                                 "match_value differs from match_criteria"
2204                                 " %p[%u] != %p[%u]",
2205                                 match_value, i, match_mask, i);
2206                         return -EINVAL;
2207                 }
2208         }
2209         return 0;
2210 }
2211 #endif
2212
2213 /**
2214  * Add Ethernet item to matcher and to the value.
2215  *
2216  * @param[in, out] matcher
2217  *   Flow matcher.
2218  * @param[in, out] key
2219  *   Flow matcher value.
2220  * @param[in] item
2221  *   Flow pattern to translate.
2222  * @param[in] inner
2223  *   Item is inner pattern.
2224  */
2225 static void
2226 flow_dv_translate_item_eth(void *matcher, void *key,
2227                            const struct rte_flow_item *item, int inner)
2228 {
2229         const struct rte_flow_item_eth *eth_m = item->mask;
2230         const struct rte_flow_item_eth *eth_v = item->spec;
2231         const struct rte_flow_item_eth nic_mask = {
2232                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
2233                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
2234                 .type = RTE_BE16(0xffff),
2235         };
2236         void *headers_m;
2237         void *headers_v;
2238         char *l24_v;
2239         unsigned int i;
2240
2241         if (!eth_v)
2242                 return;
2243         if (!eth_m)
2244                 eth_m = &nic_mask;
2245         if (inner) {
2246                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2247                                          inner_headers);
2248                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2249         } else {
2250                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2251                                          outer_headers);
2252                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2253         }
2254         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
2255                &eth_m->dst, sizeof(eth_m->dst));
2256         /* The value must be in the range of the mask. */
2257         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
2258         for (i = 0; i < sizeof(eth_m->dst); ++i)
2259                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
2260         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
2261                &eth_m->src, sizeof(eth_m->src));
2262         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
2263         /* The value must be in the range of the mask. */
2264         for (i = 0; i < sizeof(eth_m->dst); ++i)
2265                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
2266         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
2267                  rte_be_to_cpu_16(eth_m->type));
2268         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
2269         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
2270 }
2271
2272 /**
2273  * Add VLAN item to matcher and to the value.
2274  *
2275  * @param[in, out] matcher
2276  *   Flow matcher.
2277  * @param[in, out] key
2278  *   Flow matcher value.
2279  * @param[in] item
2280  *   Flow pattern to translate.
2281  * @param[in] inner
2282  *   Item is inner pattern.
2283  */
2284 static void
2285 flow_dv_translate_item_vlan(void *matcher, void *key,
2286                             const struct rte_flow_item *item,
2287                             int inner)
2288 {
2289         const struct rte_flow_item_vlan *vlan_m = item->mask;
2290         const struct rte_flow_item_vlan *vlan_v = item->spec;
2291         const struct rte_flow_item_vlan nic_mask = {
2292                 .tci = RTE_BE16(0x0fff),
2293                 .inner_type = RTE_BE16(0xffff),
2294         };
2295         void *headers_m;
2296         void *headers_v;
2297         uint16_t tci_m;
2298         uint16_t tci_v;
2299
2300         if (!vlan_v)
2301                 return;
2302         if (!vlan_m)
2303                 vlan_m = &nic_mask;
2304         if (inner) {
2305                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2306                                          inner_headers);
2307                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2308         } else {
2309                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2310                                          outer_headers);
2311                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2312         }
2313         tci_m = rte_be_to_cpu_16(vlan_m->tci);
2314         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
2315         MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
2316         MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
2317         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
2318         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
2319         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
2320         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
2321         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
2322         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
2323 }
2324
2325 /**
2326  * Add IPV4 item to matcher and to the value.
2327  *
2328  * @param[in, out] matcher
2329  *   Flow matcher.
2330  * @param[in, out] key
2331  *   Flow matcher value.
2332  * @param[in] item
2333  *   Flow pattern to translate.
2334  * @param[in] inner
2335  *   Item is inner pattern.
2336  * @param[in] group
2337  *   The group to insert the rule.
2338  */
2339 static void
2340 flow_dv_translate_item_ipv4(void *matcher, void *key,
2341                             const struct rte_flow_item *item,
2342                             int inner, uint32_t group)
2343 {
2344         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
2345         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
2346         const struct rte_flow_item_ipv4 nic_mask = {
2347                 .hdr = {
2348                         .src_addr = RTE_BE32(0xffffffff),
2349                         .dst_addr = RTE_BE32(0xffffffff),
2350                         .type_of_service = 0xff,
2351                         .next_proto_id = 0xff,
2352                 },
2353         };
2354         void *headers_m;
2355         void *headers_v;
2356         char *l24_m;
2357         char *l24_v;
2358         uint8_t tos;
2359
2360         if (inner) {
2361                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2362                                          inner_headers);
2363                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2364         } else {
2365                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2366                                          outer_headers);
2367                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2368         }
2369         if (group == 0)
2370                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
2371         else
2372                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x4);
2373         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
2374         if (!ipv4_v)
2375                 return;
2376         if (!ipv4_m)
2377                 ipv4_m = &nic_mask;
2378         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2379                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
2380         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2381                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
2382         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
2383         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
2384         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2385                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
2386         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2387                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
2388         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
2389         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
2390         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
2391         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
2392                  ipv4_m->hdr.type_of_service);
2393         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
2394         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
2395                  ipv4_m->hdr.type_of_service >> 2);
2396         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
2397         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
2398                  ipv4_m->hdr.next_proto_id);
2399         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2400                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
2401 }
2402
2403 /**
2404  * Add IPV6 item to matcher and to the value.
2405  *
2406  * @param[in, out] matcher
2407  *   Flow matcher.
2408  * @param[in, out] key
2409  *   Flow matcher value.
2410  * @param[in] item
2411  *   Flow pattern to translate.
2412  * @param[in] inner
2413  *   Item is inner pattern.
2414  * @param[in] group
2415  *   The group to insert the rule.
2416  */
2417 static void
2418 flow_dv_translate_item_ipv6(void *matcher, void *key,
2419                             const struct rte_flow_item *item,
2420                             int inner, uint32_t group)
2421 {
2422         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
2423         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
2424         const struct rte_flow_item_ipv6 nic_mask = {
2425                 .hdr = {
2426                         .src_addr =
2427                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
2428                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
2429                         .dst_addr =
2430                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
2431                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
2432                         .vtc_flow = RTE_BE32(0xffffffff),
2433                         .proto = 0xff,
2434                         .hop_limits = 0xff,
2435                 },
2436         };
2437         void *headers_m;
2438         void *headers_v;
2439         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2440         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2441         char *l24_m;
2442         char *l24_v;
2443         uint32_t vtc_m;
2444         uint32_t vtc_v;
2445         int i;
2446         int size;
2447
2448         if (inner) {
2449                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2450                                          inner_headers);
2451                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2452         } else {
2453                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2454                                          outer_headers);
2455                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2456         }
2457         if (group == 0)
2458                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
2459         else
2460                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x6);
2461         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
2462         if (!ipv6_v)
2463                 return;
2464         if (!ipv6_m)
2465                 ipv6_m = &nic_mask;
2466         size = sizeof(ipv6_m->hdr.dst_addr);
2467         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2468                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
2469         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2470                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
2471         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
2472         for (i = 0; i < size; ++i)
2473                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
2474         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2475                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
2476         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2477                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
2478         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
2479         for (i = 0; i < size; ++i)
2480                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
2481         /* TOS. */
2482         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
2483         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
2484         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
2485         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
2486         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
2487         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
2488         /* Label. */
2489         if (inner) {
2490                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
2491                          vtc_m);
2492                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
2493                          vtc_v);
2494         } else {
2495                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
2496                          vtc_m);
2497                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
2498                          vtc_v);
2499         }
2500         /* Protocol. */
2501         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
2502                  ipv6_m->hdr.proto);
2503         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2504                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
2505 }
2506
2507 /**
2508  * Add TCP item to matcher and to the value.
2509  *
2510  * @param[in, out] matcher
2511  *   Flow matcher.
2512  * @param[in, out] key
2513  *   Flow matcher value.
2514  * @param[in] item
2515  *   Flow pattern to translate.
2516  * @param[in] inner
2517  *   Item is inner pattern.
2518  */
2519 static void
2520 flow_dv_translate_item_tcp(void *matcher, void *key,
2521                            const struct rte_flow_item *item,
2522                            int inner)
2523 {
2524         const struct rte_flow_item_tcp *tcp_m = item->mask;
2525         const struct rte_flow_item_tcp *tcp_v = item->spec;
2526         void *headers_m;
2527         void *headers_v;
2528
2529         if (inner) {
2530                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2531                                          inner_headers);
2532                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2533         } else {
2534                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2535                                          outer_headers);
2536                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2537         }
2538         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2539         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
2540         if (!tcp_v)
2541                 return;
2542         if (!tcp_m)
2543                 tcp_m = &rte_flow_item_tcp_mask;
2544         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
2545                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
2546         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
2547                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
2548         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
2549                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
2550         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
2551                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
2552 }
2553
2554 /**
2555  * Add UDP item to matcher and to the value.
2556  *
2557  * @param[in, out] matcher
2558  *   Flow matcher.
2559  * @param[in, out] key
2560  *   Flow matcher value.
2561  * @param[in] item
2562  *   Flow pattern to translate.
2563  * @param[in] inner
2564  *   Item is inner pattern.
2565  */
2566 static void
2567 flow_dv_translate_item_udp(void *matcher, void *key,
2568                            const struct rte_flow_item *item,
2569                            int inner)
2570 {
2571         const struct rte_flow_item_udp *udp_m = item->mask;
2572         const struct rte_flow_item_udp *udp_v = item->spec;
2573         void *headers_m;
2574         void *headers_v;
2575
2576         if (inner) {
2577                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2578                                          inner_headers);
2579                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2580         } else {
2581                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2582                                          outer_headers);
2583                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2584         }
2585         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2586         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
2587         if (!udp_v)
2588                 return;
2589         if (!udp_m)
2590                 udp_m = &rte_flow_item_udp_mask;
2591         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
2592                  rte_be_to_cpu_16(udp_m->hdr.src_port));
2593         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
2594                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
2595         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
2596                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
2597         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
2598                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
2599 }
2600
2601 /**
2602  * Add GRE item to matcher and to the value.
2603  *
2604  * @param[in, out] matcher
2605  *   Flow matcher.
2606  * @param[in, out] key
2607  *   Flow matcher value.
2608  * @param[in] item
2609  *   Flow pattern to translate.
2610  * @param[in] inner
2611  *   Item is inner pattern.
2612  */
2613 static void
2614 flow_dv_translate_item_gre(void *matcher, void *key,
2615                            const struct rte_flow_item *item,
2616                            int inner)
2617 {
2618         const struct rte_flow_item_gre *gre_m = item->mask;
2619         const struct rte_flow_item_gre *gre_v = item->spec;
2620         void *headers_m;
2621         void *headers_v;
2622         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2623         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2624
2625         if (inner) {
2626                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2627                                          inner_headers);
2628                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2629         } else {
2630                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2631                                          outer_headers);
2632                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2633         }
2634         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2635         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
2636         if (!gre_v)
2637                 return;
2638         if (!gre_m)
2639                 gre_m = &rte_flow_item_gre_mask;
2640         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
2641                  rte_be_to_cpu_16(gre_m->protocol));
2642         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
2643                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
2644 }
2645
2646 /**
2647  * Add NVGRE item to matcher and to the value.
2648  *
2649  * @param[in, out] matcher
2650  *   Flow matcher.
2651  * @param[in, out] key
2652  *   Flow matcher value.
2653  * @param[in] item
2654  *   Flow pattern to translate.
2655  * @param[in] inner
2656  *   Item is inner pattern.
2657  */
2658 static void
2659 flow_dv_translate_item_nvgre(void *matcher, void *key,
2660                              const struct rte_flow_item *item,
2661                              int inner)
2662 {
2663         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
2664         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
2665         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2666         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2667         const char *tni_flow_id_m = (const char *)nvgre_m->tni;
2668         const char *tni_flow_id_v = (const char *)nvgre_v->tni;
2669         char *gre_key_m;
2670         char *gre_key_v;
2671         int size;
2672         int i;
2673
2674         flow_dv_translate_item_gre(matcher, key, item, inner);
2675         if (!nvgre_v)
2676                 return;
2677         if (!nvgre_m)
2678                 nvgre_m = &rte_flow_item_nvgre_mask;
2679         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
2680         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
2681         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
2682         memcpy(gre_key_m, tni_flow_id_m, size);
2683         for (i = 0; i < size; ++i)
2684                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
2685 }
2686
2687 /**
2688  * Add VXLAN item to matcher and to the value.
2689  *
2690  * @param[in, out] matcher
2691  *   Flow matcher.
2692  * @param[in, out] key
2693  *   Flow matcher value.
2694  * @param[in] item
2695  *   Flow pattern to translate.
2696  * @param[in] inner
2697  *   Item is inner pattern.
2698  */
2699 static void
2700 flow_dv_translate_item_vxlan(void *matcher, void *key,
2701                              const struct rte_flow_item *item,
2702                              int inner)
2703 {
2704         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
2705         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
2706         void *headers_m;
2707         void *headers_v;
2708         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2709         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2710         char *vni_m;
2711         char *vni_v;
2712         uint16_t dport;
2713         int size;
2714         int i;
2715
2716         if (inner) {
2717                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2718                                          inner_headers);
2719                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2720         } else {
2721                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2722                                          outer_headers);
2723                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2724         }
2725         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
2726                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
2727         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
2728                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
2729                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
2730         }
2731         if (!vxlan_v)
2732                 return;
2733         if (!vxlan_m)
2734                 vxlan_m = &rte_flow_item_vxlan_mask;
2735         size = sizeof(vxlan_m->vni);
2736         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
2737         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
2738         memcpy(vni_m, vxlan_m->vni, size);
2739         for (i = 0; i < size; ++i)
2740                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
2741 }
2742
2743 /**
2744  * Add MPLS item to matcher and to the value.
2745  *
2746  * @param[in, out] matcher
2747  *   Flow matcher.
2748  * @param[in, out] key
2749  *   Flow matcher value.
2750  * @param[in] item
2751  *   Flow pattern to translate.
2752  * @param[in] prev_layer
2753  *   The protocol layer indicated in previous item.
2754  * @param[in] inner
2755  *   Item is inner pattern.
2756  */
2757 static void
2758 flow_dv_translate_item_mpls(void *matcher, void *key,
2759                             const struct rte_flow_item *item,
2760                             uint64_t prev_layer,
2761                             int inner)
2762 {
2763         const uint32_t *in_mpls_m = item->mask;
2764         const uint32_t *in_mpls_v = item->spec;
2765         uint32_t *out_mpls_m = 0;
2766         uint32_t *out_mpls_v = 0;
2767         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2768         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2769         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
2770                                      misc_parameters_2);
2771         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
2772         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
2773         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2774
2775         switch (prev_layer) {
2776         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
2777                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
2778                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
2779                          MLX5_UDP_PORT_MPLS);
2780                 break;
2781         case MLX5_FLOW_LAYER_GRE:
2782                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
2783                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
2784                          ETHER_TYPE_MPLS);
2785                 break;
2786         default:
2787                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2788                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2789                          IPPROTO_MPLS);
2790                 break;
2791         }
2792         if (!in_mpls_v)
2793                 return;
2794         if (!in_mpls_m)
2795                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
2796         switch (prev_layer) {
2797         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
2798                 out_mpls_m =
2799                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
2800                                                  outer_first_mpls_over_udp);
2801                 out_mpls_v =
2802                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
2803                                                  outer_first_mpls_over_udp);
2804                 break;
2805         case MLX5_FLOW_LAYER_GRE:
2806                 out_mpls_m =
2807                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
2808                                                  outer_first_mpls_over_gre);
2809                 out_mpls_v =
2810                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
2811                                                  outer_first_mpls_over_gre);
2812                 break;
2813         default:
2814                 /* Inner MPLS not over GRE is not supported. */
2815                 if (!inner) {
2816                         out_mpls_m =
2817                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
2818                                                          misc2_m,
2819                                                          outer_first_mpls);
2820                         out_mpls_v =
2821                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
2822                                                          misc2_v,
2823                                                          outer_first_mpls);
2824                 }
2825                 break;
2826         }
2827         if (out_mpls_m && out_mpls_v) {
2828                 *out_mpls_m = *in_mpls_m;
2829                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
2830         }
2831 }
2832
2833 /**
2834  * Add META item to matcher
2835  *
2836  * @param[in, out] matcher
2837  *   Flow matcher.
2838  * @param[in, out] key
2839  *   Flow matcher value.
2840  * @param[in] item
2841  *   Flow pattern to translate.
2842  * @param[in] inner
2843  *   Item is inner pattern.
2844  */
2845 static void
2846 flow_dv_translate_item_meta(void *matcher, void *key,
2847                             const struct rte_flow_item *item)
2848 {
2849         const struct rte_flow_item_meta *meta_m;
2850         const struct rte_flow_item_meta *meta_v;
2851         void *misc2_m =
2852                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
2853         void *misc2_v =
2854                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
2855
2856         meta_m = (const void *)item->mask;
2857         if (!meta_m)
2858                 meta_m = &rte_flow_item_meta_mask;
2859         meta_v = (const void *)item->spec;
2860         if (meta_v) {
2861                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a,
2862                          rte_be_to_cpu_32(meta_m->data));
2863                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a,
2864                          rte_be_to_cpu_32(meta_v->data & meta_m->data));
2865         }
2866 }
2867
2868 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
2869
2870 #define HEADER_IS_ZERO(match_criteria, headers)                              \
2871         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
2872                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
2873
2874 /**
2875  * Calculate flow matcher enable bitmap.
2876  *
2877  * @param match_criteria
2878  *   Pointer to flow matcher criteria.
2879  *
2880  * @return
2881  *   Bitmap of enabled fields.
2882  */
2883 static uint8_t
2884 flow_dv_matcher_enable(uint32_t *match_criteria)
2885 {
2886         uint8_t match_criteria_enable;
2887
2888         match_criteria_enable =
2889                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
2890                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
2891         match_criteria_enable |=
2892                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
2893                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
2894         match_criteria_enable |=
2895                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
2896                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
2897         match_criteria_enable |=
2898                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
2899                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
2900 #ifdef HAVE_MLX5DV_DR
2901         match_criteria_enable |=
2902                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
2903                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
2904 #endif
2905         return match_criteria_enable;
2906 }
2907
2908
2909 /**
2910  * Get a flow table.
2911  *
2912  * @param dev[in, out]
2913  *   Pointer to rte_eth_dev structure.
2914  * @param[in] table_id
2915  *   Table id to use.
2916  * @param[in] egress
2917  *   Direction of the table.
2918  * @param[out] error
2919  *   pointer to error structure.
2920  *
2921  * @return
2922  *   Returns tables resource based on the index, NULL in case of failed.
2923  */
2924 static struct mlx5_flow_tbl_resource *
2925 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
2926                          uint32_t table_id, uint8_t egress,
2927                          struct rte_flow_error *error)
2928 {
2929         struct mlx5_priv *priv = dev->data->dev_private;
2930         struct mlx5_ibv_shared *sh = priv->sh;
2931         struct mlx5_flow_tbl_resource *tbl;
2932
2933 #ifdef HAVE_MLX5DV_DR
2934         if (egress) {
2935                 tbl = &sh->tx_tbl[table_id];
2936                 if (!tbl->obj)
2937                         tbl->obj = mlx5_glue->dr_create_flow_tbl
2938                                 (sh->tx_ns, table_id);
2939         } else {
2940                 tbl = &sh->rx_tbl[table_id];
2941                 if (!tbl->obj)
2942                         tbl->obj = mlx5_glue->dr_create_flow_tbl
2943                                 (sh->rx_ns, table_id);
2944         }
2945         if (!tbl->obj) {
2946                 rte_flow_error_set(error, ENOMEM,
2947                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2948                                    NULL, "cannot create table");
2949                 return NULL;
2950         }
2951         rte_atomic32_inc(&tbl->refcnt);
2952         return tbl;
2953 #else
2954         (void)error;
2955         (void)tbl;
2956         if (egress)
2957                 return &sh->tx_tbl[table_id];
2958         else
2959                 return &sh->rx_tbl[table_id];
2960 #endif
2961 }
2962
2963 /**
2964  * Release a flow table.
2965  *
2966  * @param[in] tbl
2967  *   Table resource to be released.
2968  *
2969  * @return
2970  *   Returns 0 if table was released, else return 1;
2971  */
2972 static int
2973 flow_dv_tbl_resource_release(struct mlx5_flow_tbl_resource *tbl)
2974 {
2975         if (!tbl)
2976                 return 0;
2977         if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
2978                 mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
2979                 tbl->obj = NULL;
2980                 return 0;
2981         }
2982         return 1;
2983 }
2984
2985 /**
2986  * Register the flow matcher.
2987  *
2988  * @param dev[in, out]
2989  *   Pointer to rte_eth_dev structure.
2990  * @param[in, out] matcher
2991  *   Pointer to flow matcher.
2992  * @parm[in, out] dev_flow
2993  *   Pointer to the dev_flow.
2994  * @param[out] error
2995  *   pointer to error structure.
2996  *
2997  * @return
2998  *   0 on success otherwise -errno and errno is set.
2999  */
3000 static int
3001 flow_dv_matcher_register(struct rte_eth_dev *dev,
3002                          struct mlx5_flow_dv_matcher *matcher,
3003                          struct mlx5_flow *dev_flow,
3004                          struct rte_flow_error *error)
3005 {
3006         struct mlx5_priv *priv = dev->data->dev_private;
3007         struct mlx5_ibv_shared *sh = priv->sh;
3008         struct mlx5_flow_dv_matcher *cache_matcher;
3009         struct mlx5dv_flow_matcher_attr dv_attr = {
3010                 .type = IBV_FLOW_ATTR_NORMAL,
3011                 .match_mask = (void *)&matcher->mask,
3012         };
3013         struct mlx5_flow_tbl_resource *tbl = NULL;
3014
3015         /* Lookup from cache. */
3016         LIST_FOREACH(cache_matcher, &sh->matchers, next) {
3017                 if (matcher->crc == cache_matcher->crc &&
3018                     matcher->priority == cache_matcher->priority &&
3019                     matcher->egress == cache_matcher->egress &&
3020                     matcher->group == cache_matcher->group &&
3021                     !memcmp((const void *)matcher->mask.buf,
3022                             (const void *)cache_matcher->mask.buf,
3023                             cache_matcher->mask.size)) {
3024                         DRV_LOG(DEBUG,
3025                                 "priority %hd use %s matcher %p: refcnt %d++",
3026                                 cache_matcher->priority,
3027                                 cache_matcher->egress ? "tx" : "rx",
3028                                 (void *)cache_matcher,
3029                                 rte_atomic32_read(&cache_matcher->refcnt));
3030                         rte_atomic32_inc(&cache_matcher->refcnt);
3031                         dev_flow->dv.matcher = cache_matcher;
3032                         return 0;
3033                 }
3034         }
3035         /* Register new matcher. */
3036         cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
3037         if (!cache_matcher)
3038                 return rte_flow_error_set(error, ENOMEM,
3039                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3040                                           "cannot allocate matcher memory");
3041         tbl = flow_dv_tbl_resource_get(dev, matcher->group * MLX5_GROUP_FACTOR,
3042                                        matcher->egress, error);
3043         if (!tbl) {
3044                 rte_free(cache_matcher);
3045                 return rte_flow_error_set(error, ENOMEM,
3046                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3047                                           NULL, "cannot create table");
3048         }
3049         *cache_matcher = *matcher;
3050         dv_attr.match_criteria_enable =
3051                 flow_dv_matcher_enable(cache_matcher->mask.buf);
3052         dv_attr.priority = matcher->priority;
3053         if (matcher->egress)
3054                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
3055         cache_matcher->matcher_object =
3056                 mlx5_glue->dv_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj);
3057         if (!cache_matcher->matcher_object) {
3058                 rte_free(cache_matcher);
3059 #ifdef HAVE_MLX5DV_DR
3060                 flow_dv_tbl_resource_release(tbl);
3061 #endif
3062                 return rte_flow_error_set(error, ENOMEM,
3063                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3064                                           NULL, "cannot create matcher");
3065         }
3066         rte_atomic32_inc(&cache_matcher->refcnt);
3067         LIST_INSERT_HEAD(&sh->matchers, cache_matcher, next);
3068         dev_flow->dv.matcher = cache_matcher;
3069         DRV_LOG(DEBUG, "priority %hd new %s matcher %p: refcnt %d",
3070                 cache_matcher->priority,
3071                 cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher,
3072                 rte_atomic32_read(&cache_matcher->refcnt));
3073         rte_atomic32_inc(&tbl->refcnt);
3074         return 0;
3075 }
3076
3077 /**
3078  * Add source vport match to the specified matcher.
3079  *
3080  * @param[in, out] matcher
3081  *   Flow matcher.
3082  * @param[in, out] key
3083  *   Flow matcher value.
3084  * @param[in] port
3085  *   Source vport value to match
3086  * @param[in] mask
3087  *   Mask
3088  */
3089 static void
3090 flow_dv_translate_source_vport(void *matcher, void *key,
3091                               int16_t port, uint16_t mask)
3092 {
3093         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3094         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3095
3096         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
3097         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
3098 }
3099
3100 /**
3101  * Find existing tag resource or create and register a new one.
3102  *
3103  * @param dev[in, out]
3104  *   Pointer to rte_eth_dev structure.
3105  * @param[in, out] resource
3106  *   Pointer to tag resource.
3107  * @parm[in, out] dev_flow
3108  *   Pointer to the dev_flow.
3109  * @param[out] error
3110  *   pointer to error structure.
3111  *
3112  * @return
3113  *   0 on success otherwise -errno and errno is set.
3114  */
3115 static int
3116 flow_dv_tag_resource_register
3117                         (struct rte_eth_dev *dev,
3118                          struct mlx5_flow_dv_tag_resource *resource,
3119                          struct mlx5_flow *dev_flow,
3120                          struct rte_flow_error *error)
3121 {
3122         struct mlx5_priv *priv = dev->data->dev_private;
3123         struct mlx5_ibv_shared *sh = priv->sh;
3124         struct mlx5_flow_dv_tag_resource *cache_resource;
3125
3126         /* Lookup a matching resource from cache. */
3127         LIST_FOREACH(cache_resource, &sh->tags, next) {
3128                 if (resource->tag == cache_resource->tag) {
3129                         DRV_LOG(DEBUG, "tag resource %p: refcnt %d++",
3130                                 (void *)cache_resource,
3131                                 rte_atomic32_read(&cache_resource->refcnt));
3132                         rte_atomic32_inc(&cache_resource->refcnt);
3133                         dev_flow->flow->tag_resource = cache_resource;
3134                         return 0;
3135                 }
3136         }
3137         /* Register new  resource. */
3138         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
3139         if (!cache_resource)
3140                 return rte_flow_error_set(error, ENOMEM,
3141                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3142                                           "cannot allocate resource memory");
3143         *cache_resource = *resource;
3144         cache_resource->action = mlx5_glue->dv_create_flow_action_tag
3145                 (resource->tag);
3146         if (!cache_resource->action) {
3147                 rte_free(cache_resource);
3148                 return rte_flow_error_set(error, ENOMEM,
3149                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3150                                           NULL, "cannot create action");
3151         }
3152         rte_atomic32_init(&cache_resource->refcnt);
3153         rte_atomic32_inc(&cache_resource->refcnt);
3154         LIST_INSERT_HEAD(&sh->tags, cache_resource, next);
3155         dev_flow->flow->tag_resource = cache_resource;
3156         DRV_LOG(DEBUG, "new tag resource %p: refcnt %d++",
3157                 (void *)cache_resource,
3158                 rte_atomic32_read(&cache_resource->refcnt));
3159         return 0;
3160 }
3161
3162 /**
3163  * Release the tag.
3164  *
3165  * @param dev
3166  *   Pointer to Ethernet device.
3167  * @param flow
3168  *   Pointer to mlx5_flow.
3169  *
3170  * @return
3171  *   1 while a reference on it exists, 0 when freed.
3172  */
3173 static int
3174 flow_dv_tag_release(struct rte_eth_dev *dev,
3175                     struct mlx5_flow_dv_tag_resource *tag)
3176 {
3177         assert(tag);
3178         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
3179                 dev->data->port_id, (void *)tag,
3180                 rte_atomic32_read(&tag->refcnt));
3181         if (rte_atomic32_dec_and_test(&tag->refcnt)) {
3182                 claim_zero(mlx5_glue->destroy_flow_action(tag->action));
3183                 LIST_REMOVE(tag, next);
3184                 DRV_LOG(DEBUG, "port %u tag %p: removed",
3185                         dev->data->port_id, (void *)tag);
3186                 rte_free(tag);
3187                 return 0;
3188         }
3189         return 1;
3190 }
3191
3192 /**
3193  * Fill the flow with DV spec.
3194  *
3195  * @param[in] dev
3196  *   Pointer to rte_eth_dev structure.
3197  * @param[in, out] dev_flow
3198  *   Pointer to the sub flow.
3199  * @param[in] attr
3200  *   Pointer to the flow attributes.
3201  * @param[in] items
3202  *   Pointer to the list of items.
3203  * @param[in] actions
3204  *   Pointer to the list of actions.
3205  * @param[out] error
3206  *   Pointer to the error structure.
3207  *
3208  * @return
3209  *   0 on success, a negative errno value otherwise and rte_errno is set.
3210  */
3211 static int
3212 flow_dv_translate(struct rte_eth_dev *dev,
3213                   struct mlx5_flow *dev_flow,
3214                   const struct rte_flow_attr *attr,
3215                   const struct rte_flow_item items[],
3216                   const struct rte_flow_action actions[],
3217                   struct rte_flow_error *error)
3218 {
3219         struct mlx5_priv *priv = dev->data->dev_private;
3220         struct rte_flow *flow = dev_flow->flow;
3221         uint64_t item_flags = 0;
3222         uint64_t last_item = 0;
3223         uint64_t action_flags = 0;
3224         uint64_t priority = attr->priority;
3225         struct mlx5_flow_dv_matcher matcher = {
3226                 .mask = {
3227                         .size = sizeof(matcher.mask.buf),
3228                 },
3229         };
3230         int actions_n = 0;
3231         bool actions_end = false;
3232         struct mlx5_flow_dv_modify_hdr_resource res = {
3233                 .ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3234                                           MLX5DV_FLOW_TABLE_TYPE_NIC_RX
3235         };
3236         union flow_dv_attr flow_attr = { .attr = 0 };
3237         struct mlx5_flow_dv_tag_resource tag_resource;
3238
3239         if (priority == MLX5_FLOW_PRIO_RSVD)
3240                 priority = priv->config.flow_prio - 1;
3241         for (; !actions_end ; actions++) {
3242                 const struct rte_flow_action_queue *queue;
3243                 const struct rte_flow_action_rss *rss;
3244                 const struct rte_flow_action *action = actions;
3245                 const struct rte_flow_action_count *count = action->conf;
3246                 const uint8_t *rss_key;
3247                 const struct rte_flow_action_jump *jump_data;
3248                 struct mlx5_flow_dv_jump_tbl_resource jump_tbl_resource;
3249                 struct mlx5_flow_tbl_resource *tbl;
3250
3251                 switch (actions->type) {
3252                 case RTE_FLOW_ACTION_TYPE_VOID:
3253                         break;
3254                 case RTE_FLOW_ACTION_TYPE_FLAG:
3255                         tag_resource.tag =
3256                                 mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
3257                         if (!flow->tag_resource)
3258                                 if (flow_dv_tag_resource_register
3259                                     (dev, &tag_resource, dev_flow, error))
3260                                         return errno;
3261                         dev_flow->dv.actions[actions_n++] =
3262                                 flow->tag_resource->action;
3263                         action_flags |= MLX5_FLOW_ACTION_FLAG;
3264                         break;
3265                 case RTE_FLOW_ACTION_TYPE_MARK:
3266                         tag_resource.tag = mlx5_flow_mark_set
3267                               (((const struct rte_flow_action_mark *)
3268                                (actions->conf))->id);
3269                         if (!flow->tag_resource)
3270                                 if (flow_dv_tag_resource_register
3271                                     (dev, &tag_resource, dev_flow, error))
3272                                         return errno;
3273                         dev_flow->dv.actions[actions_n++] =
3274                                 flow->tag_resource->action;
3275                         action_flags |= MLX5_FLOW_ACTION_MARK;
3276                         break;
3277                 case RTE_FLOW_ACTION_TYPE_DROP:
3278                         action_flags |= MLX5_FLOW_ACTION_DROP;
3279                         break;
3280                 case RTE_FLOW_ACTION_TYPE_QUEUE:
3281                         queue = actions->conf;
3282                         flow->rss.queue_num = 1;
3283                         (*flow->queue)[0] = queue->index;
3284                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
3285                         break;
3286                 case RTE_FLOW_ACTION_TYPE_RSS:
3287                         rss = actions->conf;
3288                         if (flow->queue)
3289                                 memcpy((*flow->queue), rss->queue,
3290                                        rss->queue_num * sizeof(uint16_t));
3291                         flow->rss.queue_num = rss->queue_num;
3292                         /* NULL RSS key indicates default RSS key. */
3293                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
3294                         memcpy(flow->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
3295                         /* RSS type 0 indicates default RSS type ETH_RSS_IP. */
3296                         flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
3297                         flow->rss.level = rss->level;
3298                         action_flags |= MLX5_FLOW_ACTION_RSS;
3299                         break;
3300                 case RTE_FLOW_ACTION_TYPE_COUNT:
3301                         if (!priv->config.devx) {
3302                                 rte_errno = ENOTSUP;
3303                                 goto cnt_err;
3304                         }
3305                         flow->counter = flow_dv_counter_new(dev, count->shared,
3306                                                             count->id);
3307                         if (flow->counter == NULL)
3308                                 goto cnt_err;
3309                         dev_flow->dv.actions[actions_n++] =
3310                                 flow->counter->action;
3311                         action_flags |= MLX5_FLOW_ACTION_COUNT;
3312                         break;
3313 cnt_err:
3314                         if (rte_errno == ENOTSUP)
3315                                 return rte_flow_error_set
3316                                               (error, ENOTSUP,
3317                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3318                                                NULL,
3319                                                "count action not supported");
3320                         else
3321                                 return rte_flow_error_set
3322                                                 (error, rte_errno,
3323                                                  RTE_FLOW_ERROR_TYPE_ACTION,
3324                                                  action,
3325                                                  "cannot create counter"
3326                                                   " object.");
3327                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3328                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
3329                         if (flow_dv_create_action_l2_encap(dev, actions,
3330                                                            dev_flow, error))
3331                                 return -rte_errno;
3332                         dev_flow->dv.actions[actions_n++] =
3333                                 dev_flow->dv.encap_decap->verbs_action;
3334                         action_flags |= actions->type ==
3335                                         RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
3336                                         MLX5_FLOW_ACTION_VXLAN_ENCAP :
3337                                         MLX5_FLOW_ACTION_NVGRE_ENCAP;
3338                         break;
3339                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
3340                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
3341                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
3342                                                            error))
3343                                 return -rte_errno;
3344                         dev_flow->dv.actions[actions_n++] =
3345                                 dev_flow->dv.encap_decap->verbs_action;
3346                         action_flags |= actions->type ==
3347                                         RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
3348                                         MLX5_FLOW_ACTION_VXLAN_DECAP :
3349                                         MLX5_FLOW_ACTION_NVGRE_DECAP;
3350                         break;
3351                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3352                         /* Handle encap with preceding decap. */
3353                         if (action_flags & MLX5_FLOW_ACTION_RAW_DECAP) {
3354                                 if (flow_dv_create_action_raw_encap
3355                                         (dev, actions, dev_flow, attr, error))
3356                                         return -rte_errno;
3357                                 dev_flow->dv.actions[actions_n++] =
3358                                         dev_flow->dv.encap_decap->verbs_action;
3359                         } else {
3360                                 /* Handle encap without preceding decap. */
3361                                 if (flow_dv_create_action_l2_encap(dev, actions,
3362                                                                    dev_flow,
3363                                                                    error))
3364                                         return -rte_errno;
3365                                 dev_flow->dv.actions[actions_n++] =
3366                                         dev_flow->dv.encap_decap->verbs_action;
3367                         }
3368                         action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
3369                         break;
3370                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
3371                         /* Check if this decap is followed by encap. */
3372                         for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
3373                                action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
3374                                action++) {
3375                         }
3376                         /* Handle decap only if it isn't followed by encap. */
3377                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
3378                                 if (flow_dv_create_action_l2_decap(dev,
3379                                                                    dev_flow,
3380                                                                    error))
3381                                         return -rte_errno;
3382                                 dev_flow->dv.actions[actions_n++] =
3383                                         dev_flow->dv.encap_decap->verbs_action;
3384                         }
3385                         /* If decap is followed by encap, handle it at encap. */
3386                         action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
3387                         break;
3388                 case RTE_FLOW_ACTION_TYPE_JUMP:
3389                         jump_data = action->conf;
3390                         tbl = flow_dv_tbl_resource_get(dev, jump_data->group *
3391                                                        MLX5_GROUP_FACTOR,
3392                                                        attr->egress, error);
3393                         if (!tbl)
3394                                 return rte_flow_error_set
3395                                                 (error, errno,
3396                                                  RTE_FLOW_ERROR_TYPE_ACTION,
3397                                                  NULL,
3398                                                  "cannot create jump action.");
3399                         jump_tbl_resource.tbl = tbl;
3400                         if (flow_dv_jump_tbl_resource_register
3401                             (dev, &jump_tbl_resource, dev_flow, error)) {
3402                                 flow_dv_tbl_resource_release(tbl);
3403                                 return rte_flow_error_set
3404                                                 (error, errno,
3405                                                  RTE_FLOW_ERROR_TYPE_ACTION,
3406                                                  NULL,
3407                                                  "cannot create jump action.");
3408                         }
3409                         dev_flow->dv.actions[actions_n++] =
3410                                 dev_flow->dv.jump->action;
3411                         action_flags |= MLX5_FLOW_ACTION_JUMP;
3412                         break;
3413                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
3414                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
3415                         if (flow_dv_convert_action_modify_mac(&res, actions,
3416                                                               error))
3417                                 return -rte_errno;
3418                         action_flags |= actions->type ==
3419                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
3420                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
3421                                         MLX5_FLOW_ACTION_SET_MAC_DST;
3422                         break;
3423                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
3424                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
3425                         if (flow_dv_convert_action_modify_ipv4(&res, actions,
3426                                                                error))
3427                                 return -rte_errno;
3428                         action_flags |= actions->type ==
3429                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
3430                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
3431                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
3432                         break;
3433                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
3434                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
3435                         if (flow_dv_convert_action_modify_ipv6(&res, actions,
3436                                                                error))
3437                                 return -rte_errno;
3438                         action_flags |= actions->type ==
3439                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
3440                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
3441                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
3442                         break;
3443                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
3444                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
3445                         if (flow_dv_convert_action_modify_tp(&res, actions,
3446                                                              items, &flow_attr,
3447                                                              error))
3448                                 return -rte_errno;
3449                         action_flags |= actions->type ==
3450                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
3451                                         MLX5_FLOW_ACTION_SET_TP_SRC :
3452                                         MLX5_FLOW_ACTION_SET_TP_DST;
3453                         break;
3454                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
3455                         if (flow_dv_convert_action_modify_dec_ttl(&res, items,
3456                                                                   &flow_attr,
3457                                                                   error))
3458                                 return -rte_errno;
3459                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
3460                         break;
3461                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
3462                         if (flow_dv_convert_action_modify_ttl(&res, actions,
3463                                                              items, &flow_attr,
3464                                                              error))
3465                                 return -rte_errno;
3466                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
3467                         break;
3468                 case RTE_FLOW_ACTION_TYPE_END:
3469                         actions_end = true;
3470                         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) {
3471                                 /* create modify action if needed. */
3472                                 if (flow_dv_modify_hdr_resource_register
3473                                                                 (dev, &res,
3474                                                                  dev_flow,
3475                                                                  error))
3476                                         return -rte_errno;
3477                                 dev_flow->dv.actions[actions_n++] =
3478                                         dev_flow->dv.modify_hdr->verbs_action;
3479                         }
3480                         break;
3481                 default:
3482                         break;
3483                 }
3484         }
3485         dev_flow->dv.actions_n = actions_n;
3486         flow->actions = action_flags;
3487         if (attr->ingress && !attr->transfer &&
3488             (priv->representor || priv->master)) {
3489                 /* It was validated - we support unidirection flows only. */
3490                 assert(!attr->egress);
3491                 /*
3492                  * Add matching on source vport index only
3493                  * for ingress rules in E-Switch configurations.
3494                  */
3495                 flow_dv_translate_source_vport(matcher.mask.buf,
3496                                                dev_flow->dv.value.buf,
3497                                                priv->vport_id,
3498                                                0xffff);
3499         }
3500         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
3501                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
3502                 void *match_mask = matcher.mask.buf;
3503                 void *match_value = dev_flow->dv.value.buf;
3504
3505                 switch (items->type) {
3506                 case RTE_FLOW_ITEM_TYPE_ETH:
3507                         flow_dv_translate_item_eth(match_mask, match_value,
3508                                                    items, tunnel);
3509                         matcher.priority = MLX5_PRIORITY_MAP_L2;
3510                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
3511                                              MLX5_FLOW_LAYER_OUTER_L2;
3512                         break;
3513                 case RTE_FLOW_ITEM_TYPE_VLAN:
3514                         flow_dv_translate_item_vlan(match_mask, match_value,
3515                                                     items, tunnel);
3516                         matcher.priority = MLX5_PRIORITY_MAP_L2;
3517                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
3518                                               MLX5_FLOW_LAYER_INNER_VLAN) :
3519                                              (MLX5_FLOW_LAYER_OUTER_L2 |
3520                                               MLX5_FLOW_LAYER_OUTER_VLAN);
3521                         break;
3522                 case RTE_FLOW_ITEM_TYPE_IPV4:
3523                         flow_dv_translate_item_ipv4(match_mask, match_value,
3524                                                     items, tunnel, attr->group);
3525                         matcher.priority = MLX5_PRIORITY_MAP_L3;
3526                         dev_flow->dv.hash_fields |=
3527                                 mlx5_flow_hashfields_adjust
3528                                         (dev_flow, tunnel,
3529                                          MLX5_IPV4_LAYER_TYPES,
3530                                          MLX5_IPV4_IBV_RX_HASH);
3531                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
3532                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3533                         break;
3534                 case RTE_FLOW_ITEM_TYPE_IPV6:
3535                         flow_dv_translate_item_ipv6(match_mask, match_value,
3536                                                     items, tunnel, attr->group);
3537                         matcher.priority = MLX5_PRIORITY_MAP_L3;
3538                         dev_flow->dv.hash_fields |=
3539                                 mlx5_flow_hashfields_adjust
3540                                         (dev_flow, tunnel,
3541                                          MLX5_IPV6_LAYER_TYPES,
3542                                          MLX5_IPV6_IBV_RX_HASH);
3543                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
3544                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3545                         break;
3546                 case RTE_FLOW_ITEM_TYPE_TCP:
3547                         flow_dv_translate_item_tcp(match_mask, match_value,
3548                                                    items, tunnel);
3549                         matcher.priority = MLX5_PRIORITY_MAP_L4;
3550                         dev_flow->dv.hash_fields |=
3551                                 mlx5_flow_hashfields_adjust
3552                                         (dev_flow, tunnel, ETH_RSS_TCP,
3553                                          IBV_RX_HASH_SRC_PORT_TCP |
3554                                          IBV_RX_HASH_DST_PORT_TCP);
3555                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
3556                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
3557                         break;
3558                 case RTE_FLOW_ITEM_TYPE_UDP:
3559                         flow_dv_translate_item_udp(match_mask, match_value,
3560                                                    items, tunnel);
3561                         matcher.priority = MLX5_PRIORITY_MAP_L4;
3562                         dev_flow->dv.hash_fields |=
3563                                 mlx5_flow_hashfields_adjust
3564                                         (dev_flow, tunnel, ETH_RSS_UDP,
3565                                          IBV_RX_HASH_SRC_PORT_UDP |
3566                                          IBV_RX_HASH_DST_PORT_UDP);
3567                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
3568                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
3569                         break;
3570                 case RTE_FLOW_ITEM_TYPE_GRE:
3571                         flow_dv_translate_item_gre(match_mask, match_value,
3572                                                    items, tunnel);
3573                         last_item = MLX5_FLOW_LAYER_GRE;
3574                         break;
3575                 case RTE_FLOW_ITEM_TYPE_NVGRE:
3576                         flow_dv_translate_item_nvgre(match_mask, match_value,
3577                                                      items, tunnel);
3578                         last_item = MLX5_FLOW_LAYER_GRE;
3579                         break;
3580                 case RTE_FLOW_ITEM_TYPE_VXLAN:
3581                         flow_dv_translate_item_vxlan(match_mask, match_value,
3582                                                      items, tunnel);
3583                         last_item = MLX5_FLOW_LAYER_VXLAN;
3584                         break;
3585                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3586                         flow_dv_translate_item_vxlan(match_mask, match_value,
3587                                                      items, tunnel);
3588                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
3589                         break;
3590                 case RTE_FLOW_ITEM_TYPE_MPLS:
3591                         flow_dv_translate_item_mpls(match_mask, match_value,
3592                                                     items, last_item, tunnel);
3593                         last_item = MLX5_FLOW_LAYER_MPLS;
3594                         break;
3595                 case RTE_FLOW_ITEM_TYPE_META:
3596                         flow_dv_translate_item_meta(match_mask, match_value,
3597                                                     items);
3598                         last_item = MLX5_FLOW_ITEM_METADATA;
3599                         break;
3600                 default:
3601                         break;
3602                 }
3603                 item_flags |= last_item;
3604         }
3605         assert(!flow_dv_check_valid_spec(matcher.mask.buf,
3606                                          dev_flow->dv.value.buf));
3607         dev_flow->layers = item_flags;
3608         /* Register matcher. */
3609         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
3610                                     matcher.mask.size);
3611         matcher.priority = mlx5_flow_adjust_priority(dev, priority,
3612                                                      matcher.priority);
3613         matcher.egress = attr->egress;
3614         matcher.group = attr->group;
3615         if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
3616                 return -rte_errno;
3617         return 0;
3618 }
3619
3620 /**
3621  * Apply the flow to the NIC.
3622  *
3623  * @param[in] dev
3624  *   Pointer to the Ethernet device structure.
3625  * @param[in, out] flow
3626  *   Pointer to flow structure.
3627  * @param[out] error
3628  *   Pointer to error structure.
3629  *
3630  * @return
3631  *   0 on success, a negative errno value otherwise and rte_errno is set.
3632  */
3633 static int
3634 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
3635               struct rte_flow_error *error)
3636 {
3637         struct mlx5_flow_dv *dv;
3638         struct mlx5_flow *dev_flow;
3639         int n;
3640         int err;
3641
3642         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
3643                 dv = &dev_flow->dv;
3644                 n = dv->actions_n;
3645                 if (flow->actions & MLX5_FLOW_ACTION_DROP) {
3646                         dv->hrxq = mlx5_hrxq_drop_new(dev);
3647                         if (!dv->hrxq) {
3648                                 rte_flow_error_set
3649                                         (error, errno,
3650                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3651                                          "cannot get drop hash queue");
3652                                 goto error;
3653                         }
3654                         dv->actions[n++] =
3655                                 mlx5_glue->dv_create_flow_action_dest_ibv_qp
3656                                 (dv->hrxq->qp);
3657                 } else if (flow->actions &
3658                            (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
3659                         struct mlx5_hrxq *hrxq;
3660
3661                         hrxq = mlx5_hrxq_get(dev, flow->key,
3662                                              MLX5_RSS_HASH_KEY_LEN,
3663                                              dv->hash_fields,
3664                                              (*flow->queue),
3665                                              flow->rss.queue_num);
3666                         if (!hrxq)
3667                                 hrxq = mlx5_hrxq_new
3668                                         (dev, flow->key, MLX5_RSS_HASH_KEY_LEN,
3669                                          dv->hash_fields, (*flow->queue),
3670                                          flow->rss.queue_num,
3671                                          !!(dev_flow->layers &
3672                                             MLX5_FLOW_LAYER_TUNNEL));
3673                         if (!hrxq) {
3674                                 rte_flow_error_set
3675                                         (error, rte_errno,
3676                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3677                                          "cannot get hash queue");
3678                                 goto error;
3679                         }
3680                         dv->hrxq = hrxq;
3681                         dv->actions[n++] =
3682                                 mlx5_glue->dv_create_flow_action_dest_ibv_qp
3683                                 (dv->hrxq->qp);
3684                 }
3685                 dv->flow =
3686                         mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
3687                                                   (void *)&dv->value, n,
3688                                                   dv->actions);
3689                 if (!dv->flow) {
3690                         rte_flow_error_set(error, errno,
3691                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3692                                            NULL,
3693                                            "hardware refuses to create flow");
3694                         goto error;
3695                 }
3696         }
3697         return 0;
3698 error:
3699         err = rte_errno; /* Save rte_errno before cleanup. */
3700         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
3701                 struct mlx5_flow_dv *dv = &dev_flow->dv;
3702                 if (dv->hrxq) {
3703                         if (flow->actions & MLX5_FLOW_ACTION_DROP)
3704                                 mlx5_hrxq_drop_release(dev);
3705                         else
3706                                 mlx5_hrxq_release(dev, dv->hrxq);
3707                         dv->hrxq = NULL;
3708                 }
3709         }
3710         rte_errno = err; /* Restore rte_errno. */
3711         return -rte_errno;
3712 }
3713
3714 /**
3715  * Release the flow matcher.
3716  *
3717  * @param dev
3718  *   Pointer to Ethernet device.
3719  * @param flow
3720  *   Pointer to mlx5_flow.
3721  *
3722  * @return
3723  *   1 while a reference on it exists, 0 when freed.
3724  */
3725 static int
3726 flow_dv_matcher_release(struct rte_eth_dev *dev,
3727                         struct mlx5_flow *flow)
3728 {
3729         struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
3730         struct mlx5_priv *priv = dev->data->dev_private;
3731         struct mlx5_ibv_shared *sh = priv->sh;
3732         struct mlx5_flow_tbl_resource *tbl;
3733
3734         assert(matcher->matcher_object);
3735         DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
3736                 dev->data->port_id, (void *)matcher,
3737                 rte_atomic32_read(&matcher->refcnt));
3738         if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
3739                 claim_zero(mlx5_glue->dv_destroy_flow_matcher
3740                            (matcher->matcher_object));
3741                 LIST_REMOVE(matcher, next);
3742                 if (matcher->egress)
3743                         tbl = &sh->tx_tbl[matcher->group];
3744                 else
3745                         tbl = &sh->rx_tbl[matcher->group];
3746                 flow_dv_tbl_resource_release(tbl);
3747                 rte_free(matcher);
3748                 DRV_LOG(DEBUG, "port %u matcher %p: removed",
3749                         dev->data->port_id, (void *)matcher);
3750                 return 0;
3751         }
3752         return 1;
3753 }
3754
3755 /**
3756  * Release an encap/decap resource.
3757  *
3758  * @param flow
3759  *   Pointer to mlx5_flow.
3760  *
3761  * @return
3762  *   1 while a reference on it exists, 0 when freed.
3763  */
3764 static int
3765 flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
3766 {
3767         struct mlx5_flow_dv_encap_decap_resource *cache_resource =
3768                                                 flow->dv.encap_decap;
3769
3770         assert(cache_resource->verbs_action);
3771         DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
3772                 (void *)cache_resource,
3773                 rte_atomic32_read(&cache_resource->refcnt));
3774         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
3775                 claim_zero(mlx5_glue->destroy_flow_action
3776                                 (cache_resource->verbs_action));
3777                 LIST_REMOVE(cache_resource, next);
3778                 rte_free(cache_resource);
3779                 DRV_LOG(DEBUG, "encap/decap resource %p: removed",
3780                         (void *)cache_resource);
3781                 return 0;
3782         }
3783         return 1;
3784 }
3785
3786 /**
3787  * Release an jump to table action resource.
3788  *
3789  * @param flow
3790  *   Pointer to mlx5_flow.
3791  *
3792  * @return
3793  *   1 while a reference on it exists, 0 when freed.
3794  */
3795 static int
3796 flow_dv_jump_tbl_resource_release(struct mlx5_flow *flow)
3797 {
3798         struct mlx5_flow_dv_jump_tbl_resource *cache_resource =
3799                                                 flow->dv.jump;
3800
3801         assert(cache_resource->action);
3802         DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--",
3803                 (void *)cache_resource,
3804                 rte_atomic32_read(&cache_resource->refcnt));
3805         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
3806                 claim_zero(mlx5_glue->destroy_flow_action
3807                                 (cache_resource->action));
3808                 LIST_REMOVE(cache_resource, next);
3809                 flow_dv_tbl_resource_release(cache_resource->tbl);
3810                 rte_free(cache_resource);
3811                 DRV_LOG(DEBUG, "jump table resource %p: removed",
3812                         (void *)cache_resource);
3813                 return 0;
3814         }
3815         return 1;
3816 }
3817
3818 /**
3819  * Release a modify-header resource.
3820  *
3821  * @param flow
3822  *   Pointer to mlx5_flow.
3823  *
3824  * @return
3825  *   1 while a reference on it exists, 0 when freed.
3826  */
3827 static int
3828 flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow)
3829 {
3830         struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
3831                                                 flow->dv.modify_hdr;
3832
3833         assert(cache_resource->verbs_action);
3834         DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
3835                 (void *)cache_resource,
3836                 rte_atomic32_read(&cache_resource->refcnt));
3837         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
3838                 claim_zero(mlx5_glue->destroy_flow_action
3839                                 (cache_resource->verbs_action));
3840                 LIST_REMOVE(cache_resource, next);
3841                 rte_free(cache_resource);
3842                 DRV_LOG(DEBUG, "modify-header resource %p: removed",
3843                         (void *)cache_resource);
3844                 return 0;
3845         }
3846         return 1;
3847 }
3848
3849 /**
3850  * Remove the flow from the NIC but keeps it in memory.
3851  *
3852  * @param[in] dev
3853  *   Pointer to Ethernet device.
3854  * @param[in, out] flow
3855  *   Pointer to flow structure.
3856  */
3857 static void
3858 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
3859 {
3860         struct mlx5_flow_dv *dv;
3861         struct mlx5_flow *dev_flow;
3862
3863         if (!flow)
3864                 return;
3865         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
3866                 dv = &dev_flow->dv;
3867                 if (dv->flow) {
3868                         claim_zero(mlx5_glue->dv_destroy_flow(dv->flow));
3869                         dv->flow = NULL;
3870                 }
3871                 if (dv->hrxq) {
3872                         if (flow->actions & MLX5_FLOW_ACTION_DROP)
3873                                 mlx5_hrxq_drop_release(dev);
3874                         else
3875                                 mlx5_hrxq_release(dev, dv->hrxq);
3876                         dv->hrxq = NULL;
3877                 }
3878         }
3879 }
3880
3881 /**
3882  * Remove the flow from the NIC and the memory.
3883  *
3884  * @param[in] dev
3885  *   Pointer to the Ethernet device structure.
3886  * @param[in, out] flow
3887  *   Pointer to flow structure.
3888  */
3889 static void
3890 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
3891 {
3892         struct mlx5_flow *dev_flow;
3893
3894         if (!flow)
3895                 return;
3896         flow_dv_remove(dev, flow);
3897         if (flow->counter) {
3898                 flow_dv_counter_release(flow->counter);
3899                 flow->counter = NULL;
3900         }
3901         if (flow->tag_resource) {
3902                 flow_dv_tag_release(dev, flow->tag_resource);
3903                 flow->tag_resource = NULL;
3904         }
3905         while (!LIST_EMPTY(&flow->dev_flows)) {
3906                 dev_flow = LIST_FIRST(&flow->dev_flows);
3907                 LIST_REMOVE(dev_flow, next);
3908                 if (dev_flow->dv.matcher)
3909                         flow_dv_matcher_release(dev, dev_flow);
3910                 if (dev_flow->dv.encap_decap)
3911                         flow_dv_encap_decap_resource_release(dev_flow);
3912                 if (dev_flow->dv.modify_hdr)
3913                         flow_dv_modify_hdr_resource_release(dev_flow);
3914                 if (dev_flow->dv.jump)
3915                         flow_dv_jump_tbl_resource_release(dev_flow);
3916                 rte_free(dev_flow);
3917         }
3918 }
3919
3920 /**
3921  * Query a dv flow  rule for its statistics via devx.
3922  *
3923  * @param[in] dev
3924  *   Pointer to Ethernet device.
3925  * @param[in] flow
3926  *   Pointer to the sub flow.
3927  * @param[out] data
3928  *   data retrieved by the query.
3929  * @param[out] error
3930  *   Perform verbose error reporting if not NULL.
3931  *
3932  * @return
3933  *   0 on success, a negative errno value otherwise and rte_errno is set.
3934  */
3935 static int
3936 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
3937                     void *data, struct rte_flow_error *error)
3938 {
3939         struct mlx5_priv *priv = dev->data->dev_private;
3940         struct rte_flow_query_count *qc = data;
3941         uint64_t pkts = 0;
3942         uint64_t bytes = 0;
3943         int err;
3944
3945         if (!priv->config.devx)
3946                 return rte_flow_error_set(error, ENOTSUP,
3947                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3948                                           NULL,
3949                                           "counters are not supported");
3950         if (flow->counter) {
3951                 err = mlx5_devx_cmd_flow_counter_query
3952                                                 (flow->counter->dcs,
3953                                                  qc->reset, &pkts, &bytes);
3954                 if (err)
3955                         return rte_flow_error_set
3956                                 (error, err,
3957                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3958                                  NULL,
3959                                  "cannot read counters");
3960                 qc->hits_set = 1;
3961                 qc->bytes_set = 1;
3962                 qc->hits = pkts - flow->counter->hits;
3963                 qc->bytes = bytes - flow->counter->bytes;
3964                 if (qc->reset) {
3965                         flow->counter->hits = pkts;
3966                         flow->counter->bytes = bytes;
3967                 }
3968                 return 0;
3969         }
3970         return rte_flow_error_set(error, EINVAL,
3971                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3972                                   NULL,
3973                                   "counters are not available");
3974 }
3975
3976 /**
3977  * Query a flow.
3978  *
3979  * @see rte_flow_query()
3980  * @see rte_flow_ops
3981  */
3982 static int
3983 flow_dv_query(struct rte_eth_dev *dev,
3984               struct rte_flow *flow __rte_unused,
3985               const struct rte_flow_action *actions __rte_unused,
3986               void *data __rte_unused,
3987               struct rte_flow_error *error __rte_unused)
3988 {
3989         int ret = -EINVAL;
3990
3991         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3992                 switch (actions->type) {
3993                 case RTE_FLOW_ACTION_TYPE_VOID:
3994                         break;
3995                 case RTE_FLOW_ACTION_TYPE_COUNT:
3996                         ret = flow_dv_query_count(dev, flow, data, error);
3997                         break;
3998                 default:
3999                         return rte_flow_error_set(error, ENOTSUP,
4000                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4001                                                   actions,
4002                                                   "action not supported");
4003                 }
4004         }
4005         return ret;
4006 }
4007
4008 /*
4009  * Mutex-protected thunk to flow_dv_translate().
4010  */
4011 static int
4012 flow_d_translate(struct rte_eth_dev *dev,
4013                  struct mlx5_flow *dev_flow,
4014                  const struct rte_flow_attr *attr,
4015                  const struct rte_flow_item items[],
4016                  const struct rte_flow_action actions[],
4017                  struct rte_flow_error *error)
4018 {
4019         int ret;
4020
4021         flow_d_shared_lock(dev);
4022         ret = flow_dv_translate(dev, dev_flow, attr, items, actions, error);
4023         flow_d_shared_unlock(dev);
4024         return ret;
4025 }
4026
4027 /*
4028  * Mutex-protected thunk to flow_dv_apply().
4029  */
4030 static int
4031 flow_d_apply(struct rte_eth_dev *dev,
4032              struct rte_flow *flow,
4033              struct rte_flow_error *error)
4034 {
4035         int ret;
4036
4037         flow_d_shared_lock(dev);
4038         ret = flow_dv_apply(dev, flow, error);
4039         flow_d_shared_unlock(dev);
4040         return ret;
4041 }
4042
4043 /*
4044  * Mutex-protected thunk to flow_dv_remove().
4045  */
4046 static void
4047 flow_d_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
4048 {
4049         flow_d_shared_lock(dev);
4050         flow_dv_remove(dev, flow);
4051         flow_d_shared_unlock(dev);
4052 }
4053
4054 /*
4055  * Mutex-protected thunk to flow_dv_destroy().
4056  */
4057 static void
4058 flow_d_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
4059 {
4060         flow_d_shared_lock(dev);
4061         flow_dv_destroy(dev, flow);
4062         flow_d_shared_unlock(dev);
4063 }
4064
4065 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
4066         .validate = flow_dv_validate,
4067         .prepare = flow_dv_prepare,
4068         .translate = flow_d_translate,
4069         .apply = flow_d_apply,
4070         .remove = flow_d_remove,
4071         .destroy = flow_d_destroy,
4072         .query = flow_dv_query,
4073 };
4074
4075 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */