net/mlx5: support modify header using Direct Verbs
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9
10 /* Verbs header. */
11 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
12 #ifdef PEDANTIC
13 #pragma GCC diagnostic ignored "-Wpedantic"
14 #endif
15 #include <infiniband/verbs.h>
16 #ifdef PEDANTIC
17 #pragma GCC diagnostic error "-Wpedantic"
18 #endif
19
20 #include <rte_common.h>
21 #include <rte_ether.h>
22 #include <rte_eth_ctrl.h>
23 #include <rte_ethdev_driver.h>
24 #include <rte_flow.h>
25 #include <rte_flow_driver.h>
26 #include <rte_malloc.h>
27 #include <rte_ip.h>
28 #include <rte_gre.h>
29
30 #include "mlx5.h"
31 #include "mlx5_defs.h"
32 #include "mlx5_prm.h"
33 #include "mlx5_glue.h"
34 #include "mlx5_flow.h"
35
36 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
37
38 union flow_dv_attr {
39         struct {
40                 uint32_t valid:1;
41                 uint32_t ipv4:1;
42                 uint32_t ipv6:1;
43                 uint32_t tcp:1;
44                 uint32_t udp:1;
45                 uint32_t reserved:27;
46         };
47         uint32_t attr;
48 };
49
50 /**
51  * Initialize flow attributes structure according to flow items' types.
52  *
53  * @param[in] item
54  *   Pointer to item specification.
55  * @param[out] attr
56  *   Pointer to flow attributes structure.
57  */
58 static void
59 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr)
60 {
61         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
62                 switch (item->type) {
63                 case RTE_FLOW_ITEM_TYPE_IPV4:
64                         attr->ipv4 = 1;
65                         break;
66                 case RTE_FLOW_ITEM_TYPE_IPV6:
67                         attr->ipv6 = 1;
68                         break;
69                 case RTE_FLOW_ITEM_TYPE_UDP:
70                         attr->udp = 1;
71                         break;
72                 case RTE_FLOW_ITEM_TYPE_TCP:
73                         attr->tcp = 1;
74                         break;
75                 default:
76                         break;
77                 }
78         }
79         attr->valid = 1;
80 }
81
82 struct field_modify_info {
83         uint32_t size; /* Size of field in protocol header, in bytes. */
84         uint32_t offset; /* Offset of field in protocol header, in bytes. */
85         enum mlx5_modification_field id;
86 };
87
88 struct field_modify_info modify_eth[] = {
89         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
90         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
91         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
92         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
93         {0, 0, 0},
94 };
95
96 struct field_modify_info modify_ipv4[] = {
97         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
98         {4, 12, MLX5_MODI_OUT_SIPV4},
99         {4, 16, MLX5_MODI_OUT_DIPV4},
100         {0, 0, 0},
101 };
102
103 struct field_modify_info modify_ipv6[] = {
104         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
105         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
106         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
107         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
108         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
109         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
110         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
111         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
112         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
113         {0, 0, 0},
114 };
115
116 struct field_modify_info modify_udp[] = {
117         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
118         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
119         {0, 0, 0},
120 };
121
122 struct field_modify_info modify_tcp[] = {
123         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
124         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
125         {0, 0, 0},
126 };
127
128 /**
129  * Convert modify-header action to DV specification.
130  *
131  * @param[in] item
132  *   Pointer to item specification.
133  * @param[in] field
134  *   Pointer to field modification information.
135  * @param[in,out] resource
136  *   Pointer to the modify-header resource.
137  * @param[in] type
138  *   Type of modification.
139  * @param[out] error
140  *   Pointer to the error structure.
141  *
142  * @return
143  *   0 on success, a negative errno value otherwise and rte_errno is set.
144  */
145 static int
146 flow_dv_convert_modify_action(struct rte_flow_item *item,
147                               struct field_modify_info *field,
148                               struct mlx5_flow_dv_modify_hdr_resource *resource,
149                               uint32_t type,
150                               struct rte_flow_error *error)
151 {
152         uint32_t i = resource->actions_num;
153         struct mlx5_modification_cmd *actions = resource->actions;
154         const uint8_t *spec = item->spec;
155         const uint8_t *mask = item->mask;
156         uint32_t set;
157
158         while (field->size) {
159                 set = 0;
160                 /* Generate modify command for each mask segment. */
161                 memcpy(&set, &mask[field->offset], field->size);
162                 if (set) {
163                         if (i >= MLX5_MODIFY_NUM)
164                                 return rte_flow_error_set(error, EINVAL,
165                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
166                                          "too many items to modify");
167                         actions[i].action_type = type;
168                         actions[i].field = field->id;
169                         actions[i].length = field->size ==
170                                         4 ? 0 : field->size * 8;
171                         rte_memcpy(&actions[i].data[4 - field->size],
172                                    &spec[field->offset], field->size);
173                         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
174                         ++i;
175                 }
176                 if (resource->actions_num != i)
177                         resource->actions_num = i;
178                 field++;
179         }
180         if (!resource->actions_num)
181                 return rte_flow_error_set(error, EINVAL,
182                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
183                                           "invalid modification flow item");
184         return 0;
185 }
186
187 /**
188  * Convert modify-header set IPv4 address action to DV specification.
189  *
190  * @param[in,out] resource
191  *   Pointer to the modify-header resource.
192  * @param[in] action
193  *   Pointer to action specification.
194  * @param[out] error
195  *   Pointer to the error structure.
196  *
197  * @return
198  *   0 on success, a negative errno value otherwise and rte_errno is set.
199  */
200 static int
201 flow_dv_convert_action_modify_ipv4
202                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
203                          const struct rte_flow_action *action,
204                          struct rte_flow_error *error)
205 {
206         const struct rte_flow_action_set_ipv4 *conf =
207                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
208         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
209         struct rte_flow_item_ipv4 ipv4;
210         struct rte_flow_item_ipv4 ipv4_mask;
211
212         memset(&ipv4, 0, sizeof(ipv4));
213         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
214         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
215                 ipv4.hdr.src_addr = conf->ipv4_addr;
216                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
217         } else {
218                 ipv4.hdr.dst_addr = conf->ipv4_addr;
219                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
220         }
221         item.spec = &ipv4;
222         item.mask = &ipv4_mask;
223         return flow_dv_convert_modify_action(&item, modify_ipv4, resource,
224                                              MLX5_MODIFICATION_TYPE_SET, error);
225 }
226
227 /**
228  * Convert modify-header set IPv6 address action to DV specification.
229  *
230  * @param[in,out] resource
231  *   Pointer to the modify-header resource.
232  * @param[in] action
233  *   Pointer to action specification.
234  * @param[out] error
235  *   Pointer to the error structure.
236  *
237  * @return
238  *   0 on success, a negative errno value otherwise and rte_errno is set.
239  */
240 static int
241 flow_dv_convert_action_modify_ipv6
242                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
243                          const struct rte_flow_action *action,
244                          struct rte_flow_error *error)
245 {
246         const struct rte_flow_action_set_ipv6 *conf =
247                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
248         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
249         struct rte_flow_item_ipv6 ipv6;
250         struct rte_flow_item_ipv6 ipv6_mask;
251
252         memset(&ipv6, 0, sizeof(ipv6));
253         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
254         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
255                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
256                        sizeof(ipv6.hdr.src_addr));
257                 memcpy(&ipv6_mask.hdr.src_addr,
258                        &rte_flow_item_ipv6_mask.hdr.src_addr,
259                        sizeof(ipv6.hdr.src_addr));
260         } else {
261                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
262                        sizeof(ipv6.hdr.dst_addr));
263                 memcpy(&ipv6_mask.hdr.dst_addr,
264                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
265                        sizeof(ipv6.hdr.dst_addr));
266         }
267         item.spec = &ipv6;
268         item.mask = &ipv6_mask;
269         return flow_dv_convert_modify_action(&item, modify_ipv6, resource,
270                                              MLX5_MODIFICATION_TYPE_SET, error);
271 }
272
273 /**
274  * Convert modify-header set MAC address action to DV specification.
275  *
276  * @param[in,out] resource
277  *   Pointer to the modify-header resource.
278  * @param[in] action
279  *   Pointer to action specification.
280  * @param[out] error
281  *   Pointer to the error structure.
282  *
283  * @return
284  *   0 on success, a negative errno value otherwise and rte_errno is set.
285  */
286 static int
287 flow_dv_convert_action_modify_mac
288                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
289                          const struct rte_flow_action *action,
290                          struct rte_flow_error *error)
291 {
292         const struct rte_flow_action_set_mac *conf =
293                 (const struct rte_flow_action_set_mac *)(action->conf);
294         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
295         struct rte_flow_item_eth eth;
296         struct rte_flow_item_eth eth_mask;
297
298         memset(&eth, 0, sizeof(eth));
299         memset(&eth_mask, 0, sizeof(eth_mask));
300         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
301                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
302                        sizeof(eth.src.addr_bytes));
303                 memcpy(&eth_mask.src.addr_bytes,
304                        &rte_flow_item_eth_mask.src.addr_bytes,
305                        sizeof(eth_mask.src.addr_bytes));
306         } else {
307                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
308                        sizeof(eth.dst.addr_bytes));
309                 memcpy(&eth_mask.dst.addr_bytes,
310                        &rte_flow_item_eth_mask.dst.addr_bytes,
311                        sizeof(eth_mask.dst.addr_bytes));
312         }
313         item.spec = &eth;
314         item.mask = &eth_mask;
315         return flow_dv_convert_modify_action(&item, modify_eth, resource,
316                                              MLX5_MODIFICATION_TYPE_SET, error);
317 }
318
319 /**
320  * Convert modify-header set TP action to DV specification.
321  *
322  * @param[in,out] resource
323  *   Pointer to the modify-header resource.
324  * @param[in] action
325  *   Pointer to action specification.
326  * @param[in] items
327  *   Pointer to rte_flow_item objects list.
328  * @param[in] attr
329  *   Pointer to flow attributes structure.
330  * @param[out] error
331  *   Pointer to the error structure.
332  *
333  * @return
334  *   0 on success, a negative errno value otherwise and rte_errno is set.
335  */
336 static int
337 flow_dv_convert_action_modify_tp
338                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
339                          const struct rte_flow_action *action,
340                          const struct rte_flow_item *items,
341                          union flow_dv_attr *attr,
342                          struct rte_flow_error *error)
343 {
344         const struct rte_flow_action_set_tp *conf =
345                 (const struct rte_flow_action_set_tp *)(action->conf);
346         struct rte_flow_item item;
347         struct rte_flow_item_udp udp;
348         struct rte_flow_item_udp udp_mask;
349         struct rte_flow_item_tcp tcp;
350         struct rte_flow_item_tcp tcp_mask;
351         struct field_modify_info *field;
352
353         if (!attr->valid)
354                 flow_dv_attr_init(items, attr);
355         if (attr->udp) {
356                 memset(&udp, 0, sizeof(udp));
357                 memset(&udp_mask, 0, sizeof(udp_mask));
358                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
359                         udp.hdr.src_port = conf->port;
360                         udp_mask.hdr.src_port =
361                                         rte_flow_item_udp_mask.hdr.src_port;
362                 } else {
363                         udp.hdr.dst_port = conf->port;
364                         udp_mask.hdr.dst_port =
365                                         rte_flow_item_udp_mask.hdr.dst_port;
366                 }
367                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
368                 item.spec = &udp;
369                 item.mask = &udp_mask;
370                 field = modify_udp;
371         }
372         if (attr->tcp) {
373                 memset(&tcp, 0, sizeof(tcp));
374                 memset(&tcp_mask, 0, sizeof(tcp_mask));
375                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
376                         tcp.hdr.src_port = conf->port;
377                         tcp_mask.hdr.src_port =
378                                         rte_flow_item_tcp_mask.hdr.src_port;
379                 } else {
380                         tcp.hdr.dst_port = conf->port;
381                         tcp_mask.hdr.dst_port =
382                                         rte_flow_item_tcp_mask.hdr.dst_port;
383                 }
384                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
385                 item.spec = &tcp;
386                 item.mask = &tcp_mask;
387                 field = modify_tcp;
388         }
389         return flow_dv_convert_modify_action(&item, field, resource,
390                                              MLX5_MODIFICATION_TYPE_SET, error);
391 }
392
393 /**
394  * Convert modify-header set TTL action to DV specification.
395  *
396  * @param[in,out] resource
397  *   Pointer to the modify-header resource.
398  * @param[in] action
399  *   Pointer to action specification.
400  * @param[in] items
401  *   Pointer to rte_flow_item objects list.
402  * @param[in] attr
403  *   Pointer to flow attributes structure.
404  * @param[out] error
405  *   Pointer to the error structure.
406  *
407  * @return
408  *   0 on success, a negative errno value otherwise and rte_errno is set.
409  */
410 static int
411 flow_dv_convert_action_modify_ttl
412                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
413                          const struct rte_flow_action *action,
414                          const struct rte_flow_item *items,
415                          union flow_dv_attr *attr,
416                          struct rte_flow_error *error)
417 {
418         const struct rte_flow_action_set_ttl *conf =
419                 (const struct rte_flow_action_set_ttl *)(action->conf);
420         struct rte_flow_item item;
421         struct rte_flow_item_ipv4 ipv4;
422         struct rte_flow_item_ipv4 ipv4_mask;
423         struct rte_flow_item_ipv6 ipv6;
424         struct rte_flow_item_ipv6 ipv6_mask;
425         struct field_modify_info *field;
426
427         if (!attr->valid)
428                 flow_dv_attr_init(items, attr);
429         if (attr->ipv4) {
430                 memset(&ipv4, 0, sizeof(ipv4));
431                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
432                 ipv4.hdr.time_to_live = conf->ttl_value;
433                 ipv4_mask.hdr.time_to_live = 0xFF;
434                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
435                 item.spec = &ipv4;
436                 item.mask = &ipv4_mask;
437                 field = modify_ipv4;
438         }
439         if (attr->ipv6) {
440                 memset(&ipv6, 0, sizeof(ipv6));
441                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
442                 ipv6.hdr.hop_limits = conf->ttl_value;
443                 ipv6_mask.hdr.hop_limits = 0xFF;
444                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
445                 item.spec = &ipv6;
446                 item.mask = &ipv6_mask;
447                 field = modify_ipv6;
448         }
449         return flow_dv_convert_modify_action(&item, field, resource,
450                                              MLX5_MODIFICATION_TYPE_SET, error);
451 }
452
453 /**
454  * Convert modify-header decrement TTL action to DV specification.
455  *
456  * @param[in,out] resource
457  *   Pointer to the modify-header resource.
458  * @param[in] action
459  *   Pointer to action specification.
460  * @param[in] items
461  *   Pointer to rte_flow_item objects list.
462  * @param[in] attr
463  *   Pointer to flow attributes structure.
464  * @param[out] error
465  *   Pointer to the error structure.
466  *
467  * @return
468  *   0 on success, a negative errno value otherwise and rte_errno is set.
469  */
470 static int
471 flow_dv_convert_action_modify_dec_ttl
472                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
473                          const struct rte_flow_item *items,
474                          union flow_dv_attr *attr,
475                          struct rte_flow_error *error)
476 {
477         struct rte_flow_item item;
478         struct rte_flow_item_ipv4 ipv4;
479         struct rte_flow_item_ipv4 ipv4_mask;
480         struct rte_flow_item_ipv6 ipv6;
481         struct rte_flow_item_ipv6 ipv6_mask;
482         struct field_modify_info *field;
483
484         if (!attr->valid)
485                 flow_dv_attr_init(items, attr);
486         if (attr->ipv4) {
487                 memset(&ipv4, 0, sizeof(ipv4));
488                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
489                 ipv4.hdr.time_to_live = 0xFF;
490                 ipv4_mask.hdr.time_to_live = 0xFF;
491                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
492                 item.spec = &ipv4;
493                 item.mask = &ipv4_mask;
494                 field = modify_ipv4;
495         }
496         if (attr->ipv6) {
497                 memset(&ipv6, 0, sizeof(ipv6));
498                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
499                 ipv6.hdr.hop_limits = 0xFF;
500                 ipv6_mask.hdr.hop_limits = 0xFF;
501                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
502                 item.spec = &ipv6;
503                 item.mask = &ipv6_mask;
504                 field = modify_ipv6;
505         }
506         return flow_dv_convert_modify_action(&item, field, resource,
507                                              MLX5_MODIFICATION_TYPE_ADD, error);
508 }
509
510 /**
511  * Validate META item.
512  *
513  * @param[in] dev
514  *   Pointer to the rte_eth_dev structure.
515  * @param[in] item
516  *   Item specification.
517  * @param[in] attr
518  *   Attributes of flow that includes this item.
519  * @param[out] error
520  *   Pointer to error structure.
521  *
522  * @return
523  *   0 on success, a negative errno value otherwise and rte_errno is set.
524  */
525 static int
526 flow_dv_validate_item_meta(struct rte_eth_dev *dev,
527                            const struct rte_flow_item *item,
528                            const struct rte_flow_attr *attr,
529                            struct rte_flow_error *error)
530 {
531         const struct rte_flow_item_meta *spec = item->spec;
532         const struct rte_flow_item_meta *mask = item->mask;
533         const struct rte_flow_item_meta nic_mask = {
534                 .data = RTE_BE32(UINT32_MAX)
535         };
536         int ret;
537         uint64_t offloads = dev->data->dev_conf.txmode.offloads;
538
539         if (!(offloads & DEV_TX_OFFLOAD_MATCH_METADATA))
540                 return rte_flow_error_set(error, EPERM,
541                                           RTE_FLOW_ERROR_TYPE_ITEM,
542                                           NULL,
543                                           "match on metadata offload "
544                                           "configuration is off for this port");
545         if (!spec)
546                 return rte_flow_error_set(error, EINVAL,
547                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
548                                           item->spec,
549                                           "data cannot be empty");
550         if (!spec->data)
551                 return rte_flow_error_set(error, EINVAL,
552                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
553                                           NULL,
554                                           "data cannot be zero");
555         if (!mask)
556                 mask = &rte_flow_item_meta_mask;
557         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
558                                         (const uint8_t *)&nic_mask,
559                                         sizeof(struct rte_flow_item_meta),
560                                         error);
561         if (ret < 0)
562                 return ret;
563         if (attr->ingress)
564                 return rte_flow_error_set(error, ENOTSUP,
565                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
566                                           NULL,
567                                           "pattern not supported for ingress");
568         return 0;
569 }
570
571 /**
572  * Validate the L2 encap action.
573  *
574  * @param[in] action_flags
575  *   Holds the actions detected until now.
576  * @param[in] action
577  *   Pointer to the encap action.
578  * @param[in] attr
579  *   Pointer to flow attributes
580  * @param[out] error
581  *   Pointer to error structure.
582  *
583  * @return
584  *   0 on success, a negative errno value otherwise and rte_errno is set.
585  */
586 static int
587 flow_dv_validate_action_l2_encap(uint64_t action_flags,
588                                  const struct rte_flow_action *action,
589                                  const struct rte_flow_attr *attr,
590                                  struct rte_flow_error *error)
591 {
592         if (!(action->conf))
593                 return rte_flow_error_set(error, EINVAL,
594                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
595                                           "configuration cannot be null");
596         if (action_flags & MLX5_FLOW_ACTION_DROP)
597                 return rte_flow_error_set(error, EINVAL,
598                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
599                                           "can't drop and encap in same flow");
600         if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
601                 return rte_flow_error_set(error, EINVAL,
602                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
603                                           "can only have a single encap or"
604                                           " decap action in a flow");
605         if (attr->ingress)
606                 return rte_flow_error_set(error, ENOTSUP,
607                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
608                                           NULL,
609                                           "encap action not supported for "
610                                           "ingress");
611         return 0;
612 }
613
614 /**
615  * Validate the L2 decap action.
616  *
617  * @param[in] action_flags
618  *   Holds the actions detected until now.
619  * @param[in] attr
620  *   Pointer to flow attributes
621  * @param[out] error
622  *   Pointer to error structure.
623  *
624  * @return
625  *   0 on success, a negative errno value otherwise and rte_errno is set.
626  */
627 static int
628 flow_dv_validate_action_l2_decap(uint64_t action_flags,
629                                  const struct rte_flow_attr *attr,
630                                  struct rte_flow_error *error)
631 {
632         if (action_flags & MLX5_FLOW_ACTION_DROP)
633                 return rte_flow_error_set(error, EINVAL,
634                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
635                                           "can't drop and decap in same flow");
636         if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
637                 return rte_flow_error_set(error, EINVAL,
638                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
639                                           "can only have a single encap or"
640                                           " decap action in a flow");
641         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
642                 return rte_flow_error_set(error, EINVAL,
643                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
644                                           "can't have decap action after"
645                                           " modify action");
646         if (attr->egress)
647                 return rte_flow_error_set(error, ENOTSUP,
648                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
649                                           NULL,
650                                           "decap action not supported for "
651                                           "egress");
652         return 0;
653 }
654
655 /**
656  * Validate the raw encap action.
657  *
658  * @param[in] action_flags
659  *   Holds the actions detected until now.
660  * @param[in] action
661  *   Pointer to the encap action.
662  * @param[in] attr
663  *   Pointer to flow attributes
664  * @param[out] error
665  *   Pointer to error structure.
666  *
667  * @return
668  *   0 on success, a negative errno value otherwise and rte_errno is set.
669  */
670 static int
671 flow_dv_validate_action_raw_encap(uint64_t action_flags,
672                                   const struct rte_flow_action *action,
673                                   const struct rte_flow_attr *attr,
674                                   struct rte_flow_error *error)
675 {
676         if (!(action->conf))
677                 return rte_flow_error_set(error, EINVAL,
678                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
679                                           "configuration cannot be null");
680         if (action_flags & MLX5_FLOW_ACTION_DROP)
681                 return rte_flow_error_set(error, EINVAL,
682                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
683                                           "can't drop and encap in same flow");
684         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
685                 return rte_flow_error_set(error, EINVAL,
686                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
687                                           "can only have a single encap"
688                                           " action in a flow");
689         /* encap without preceding decap is not supported for ingress */
690         if (attr->ingress && !(action_flags & MLX5_FLOW_ACTION_RAW_DECAP))
691                 return rte_flow_error_set(error, ENOTSUP,
692                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
693                                           NULL,
694                                           "encap action not supported for "
695                                           "ingress");
696         return 0;
697 }
698
699 /**
700  * Validate the raw decap action.
701  *
702  * @param[in] action_flags
703  *   Holds the actions detected until now.
704  * @param[in] action
705  *   Pointer to the encap action.
706  * @param[in] attr
707  *   Pointer to flow attributes
708  * @param[out] error
709  *   Pointer to error structure.
710  *
711  * @return
712  *   0 on success, a negative errno value otherwise and rte_errno is set.
713  */
714 static int
715 flow_dv_validate_action_raw_decap(uint64_t action_flags,
716                                   const struct rte_flow_action *action,
717                                   const struct rte_flow_attr *attr,
718                                   struct rte_flow_error *error)
719 {
720         if (action_flags & MLX5_FLOW_ACTION_DROP)
721                 return rte_flow_error_set(error, EINVAL,
722                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
723                                           "can't drop and decap in same flow");
724         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
725                 return rte_flow_error_set(error, EINVAL,
726                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
727                                           "can't have encap action before"
728                                           " decap action");
729         if (action_flags & MLX5_FLOW_DECAP_ACTIONS)
730                 return rte_flow_error_set(error, EINVAL,
731                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
732                                           "can only have a single decap"
733                                           " action in a flow");
734         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
735                 return rte_flow_error_set(error, EINVAL,
736                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
737                                           "can't have decap action after"
738                                           " modify action");
739         /* decap action is valid on egress only if it is followed by encap */
740         if (attr->egress) {
741                 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
742                        action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
743                        action++) {
744                 }
745                 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
746                         return rte_flow_error_set
747                                         (error, ENOTSUP,
748                                          RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
749                                          NULL, "decap action not supported"
750                                          " for egress");
751         }
752         return 0;
753 }
754
755 /**
756  * Find existing encap/decap resource or create and register a new one.
757  *
758  * @param dev[in, out]
759  *   Pointer to rte_eth_dev structure.
760  * @param[in, out] resource
761  *   Pointer to encap/decap resource.
762  * @parm[in, out] dev_flow
763  *   Pointer to the dev_flow.
764  * @param[out] error
765  *   pointer to error structure.
766  *
767  * @return
768  *   0 on success otherwise -errno and errno is set.
769  */
770 static int
771 flow_dv_encap_decap_resource_register
772                         (struct rte_eth_dev *dev,
773                          struct mlx5_flow_dv_encap_decap_resource *resource,
774                          struct mlx5_flow *dev_flow,
775                          struct rte_flow_error *error)
776 {
777         struct priv *priv = dev->data->dev_private;
778         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
779
780         /* Lookup a matching resource from cache. */
781         LIST_FOREACH(cache_resource, &priv->encaps_decaps, next) {
782                 if (resource->reformat_type == cache_resource->reformat_type &&
783                     resource->ft_type == cache_resource->ft_type &&
784                     resource->size == cache_resource->size &&
785                     !memcmp((const void *)resource->buf,
786                             (const void *)cache_resource->buf,
787                             resource->size)) {
788                         DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
789                                 (void *)cache_resource,
790                                 rte_atomic32_read(&cache_resource->refcnt));
791                         rte_atomic32_inc(&cache_resource->refcnt);
792                         dev_flow->dv.encap_decap = cache_resource;
793                         return 0;
794                 }
795         }
796         /* Register new encap/decap resource. */
797         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
798         if (!cache_resource)
799                 return rte_flow_error_set(error, ENOMEM,
800                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
801                                           "cannot allocate resource memory");
802         *cache_resource = *resource;
803         cache_resource->verbs_action =
804                 mlx5_glue->dv_create_flow_action_packet_reformat
805                         (priv->ctx, cache_resource->size,
806                          (cache_resource->size ? cache_resource->buf : NULL),
807                          cache_resource->reformat_type,
808                          cache_resource->ft_type);
809         if (!cache_resource->verbs_action) {
810                 rte_free(cache_resource);
811                 return rte_flow_error_set(error, ENOMEM,
812                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
813                                           NULL, "cannot create action");
814         }
815         rte_atomic32_init(&cache_resource->refcnt);
816         rte_atomic32_inc(&cache_resource->refcnt);
817         LIST_INSERT_HEAD(&priv->encaps_decaps, cache_resource, next);
818         dev_flow->dv.encap_decap = cache_resource;
819         DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
820                 (void *)cache_resource,
821                 rte_atomic32_read(&cache_resource->refcnt));
822         return 0;
823 }
824
825 /**
826  * Get the size of specific rte_flow_item_type
827  *
828  * @param[in] item_type
829  *   Tested rte_flow_item_type.
830  *
831  * @return
832  *   sizeof struct item_type, 0 if void or irrelevant.
833  */
834 static size_t
835 flow_dv_get_item_len(const enum rte_flow_item_type item_type)
836 {
837         size_t retval;
838
839         switch (item_type) {
840         case RTE_FLOW_ITEM_TYPE_ETH:
841                 retval = sizeof(struct rte_flow_item_eth);
842                 break;
843         case RTE_FLOW_ITEM_TYPE_VLAN:
844                 retval = sizeof(struct rte_flow_item_vlan);
845                 break;
846         case RTE_FLOW_ITEM_TYPE_IPV4:
847                 retval = sizeof(struct rte_flow_item_ipv4);
848                 break;
849         case RTE_FLOW_ITEM_TYPE_IPV6:
850                 retval = sizeof(struct rte_flow_item_ipv6);
851                 break;
852         case RTE_FLOW_ITEM_TYPE_UDP:
853                 retval = sizeof(struct rte_flow_item_udp);
854                 break;
855         case RTE_FLOW_ITEM_TYPE_TCP:
856                 retval = sizeof(struct rte_flow_item_tcp);
857                 break;
858         case RTE_FLOW_ITEM_TYPE_VXLAN:
859                 retval = sizeof(struct rte_flow_item_vxlan);
860                 break;
861         case RTE_FLOW_ITEM_TYPE_GRE:
862                 retval = sizeof(struct rte_flow_item_gre);
863                 break;
864         case RTE_FLOW_ITEM_TYPE_NVGRE:
865                 retval = sizeof(struct rte_flow_item_nvgre);
866                 break;
867         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
868                 retval = sizeof(struct rte_flow_item_vxlan_gpe);
869                 break;
870         case RTE_FLOW_ITEM_TYPE_MPLS:
871                 retval = sizeof(struct rte_flow_item_mpls);
872                 break;
873         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
874         default:
875                 retval = 0;
876                 break;
877         }
878         return retval;
879 }
880
881 #define MLX5_ENCAP_IPV4_VERSION         0x40
882 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
883 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
884 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
885 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
886 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
887 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
888
889 /**
890  * Convert the encap action data from list of rte_flow_item to raw buffer
891  *
892  * @param[in] items
893  *   Pointer to rte_flow_item objects list.
894  * @param[out] buf
895  *   Pointer to the output buffer.
896  * @param[out] size
897  *   Pointer to the output buffer size.
898  * @param[out] error
899  *   Pointer to the error structure.
900  *
901  * @return
902  *   0 on success, a negative errno value otherwise and rte_errno is set.
903  */
904 static int
905 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
906                            size_t *size, struct rte_flow_error *error)
907 {
908         struct ether_hdr *eth = NULL;
909         struct vlan_hdr *vlan = NULL;
910         struct ipv4_hdr *ipv4 = NULL;
911         struct ipv6_hdr *ipv6 = NULL;
912         struct udp_hdr *udp = NULL;
913         struct vxlan_hdr *vxlan = NULL;
914         struct vxlan_gpe_hdr *vxlan_gpe = NULL;
915         struct gre_hdr *gre = NULL;
916         size_t len;
917         size_t temp_size = 0;
918
919         if (!items)
920                 return rte_flow_error_set(error, EINVAL,
921                                           RTE_FLOW_ERROR_TYPE_ACTION,
922                                           NULL, "invalid empty data");
923         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
924                 len = flow_dv_get_item_len(items->type);
925                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
926                         return rte_flow_error_set(error, EINVAL,
927                                                   RTE_FLOW_ERROR_TYPE_ACTION,
928                                                   (void *)items->type,
929                                                   "items total size is too big"
930                                                   " for encap action");
931                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
932                 switch (items->type) {
933                 case RTE_FLOW_ITEM_TYPE_ETH:
934                         eth = (struct ether_hdr *)&buf[temp_size];
935                         break;
936                 case RTE_FLOW_ITEM_TYPE_VLAN:
937                         vlan = (struct vlan_hdr *)&buf[temp_size];
938                         if (!eth)
939                                 return rte_flow_error_set(error, EINVAL,
940                                                 RTE_FLOW_ERROR_TYPE_ACTION,
941                                                 (void *)items->type,
942                                                 "eth header not found");
943                         if (!eth->ether_type)
944                                 eth->ether_type = RTE_BE16(ETHER_TYPE_VLAN);
945                         break;
946                 case RTE_FLOW_ITEM_TYPE_IPV4:
947                         ipv4 = (struct ipv4_hdr *)&buf[temp_size];
948                         if (!vlan && !eth)
949                                 return rte_flow_error_set(error, EINVAL,
950                                                 RTE_FLOW_ERROR_TYPE_ACTION,
951                                                 (void *)items->type,
952                                                 "neither eth nor vlan"
953                                                 " header found");
954                         if (vlan && !vlan->eth_proto)
955                                 vlan->eth_proto = RTE_BE16(ETHER_TYPE_IPv4);
956                         else if (eth && !eth->ether_type)
957                                 eth->ether_type = RTE_BE16(ETHER_TYPE_IPv4);
958                         if (!ipv4->version_ihl)
959                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
960                                                     MLX5_ENCAP_IPV4_IHL_MIN;
961                         if (!ipv4->time_to_live)
962                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
963                         break;
964                 case RTE_FLOW_ITEM_TYPE_IPV6:
965                         ipv6 = (struct ipv6_hdr *)&buf[temp_size];
966                         if (!vlan && !eth)
967                                 return rte_flow_error_set(error, EINVAL,
968                                                 RTE_FLOW_ERROR_TYPE_ACTION,
969                                                 (void *)items->type,
970                                                 "neither eth nor vlan"
971                                                 " header found");
972                         if (vlan && !vlan->eth_proto)
973                                 vlan->eth_proto = RTE_BE16(ETHER_TYPE_IPv6);
974                         else if (eth && !eth->ether_type)
975                                 eth->ether_type = RTE_BE16(ETHER_TYPE_IPv6);
976                         if (!ipv6->vtc_flow)
977                                 ipv6->vtc_flow =
978                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
979                         if (!ipv6->hop_limits)
980                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
981                         break;
982                 case RTE_FLOW_ITEM_TYPE_UDP:
983                         udp = (struct udp_hdr *)&buf[temp_size];
984                         if (!ipv4 && !ipv6)
985                                 return rte_flow_error_set(error, EINVAL,
986                                                 RTE_FLOW_ERROR_TYPE_ACTION,
987                                                 (void *)items->type,
988                                                 "ip header not found");
989                         if (ipv4 && !ipv4->next_proto_id)
990                                 ipv4->next_proto_id = IPPROTO_UDP;
991                         else if (ipv6 && !ipv6->proto)
992                                 ipv6->proto = IPPROTO_UDP;
993                         break;
994                 case RTE_FLOW_ITEM_TYPE_VXLAN:
995                         vxlan = (struct vxlan_hdr *)&buf[temp_size];
996                         if (!udp)
997                                 return rte_flow_error_set(error, EINVAL,
998                                                 RTE_FLOW_ERROR_TYPE_ACTION,
999                                                 (void *)items->type,
1000                                                 "udp header not found");
1001                         if (!udp->dst_port)
1002                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
1003                         if (!vxlan->vx_flags)
1004                                 vxlan->vx_flags =
1005                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
1006                         break;
1007                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1008                         vxlan_gpe = (struct vxlan_gpe_hdr *)&buf[temp_size];
1009                         if (!udp)
1010                                 return rte_flow_error_set(error, EINVAL,
1011                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1012                                                 (void *)items->type,
1013                                                 "udp header not found");
1014                         if (!vxlan_gpe->proto)
1015                                 return rte_flow_error_set(error, EINVAL,
1016                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1017                                                 (void *)items->type,
1018                                                 "next protocol not found");
1019                         if (!udp->dst_port)
1020                                 udp->dst_port =
1021                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
1022                         if (!vxlan_gpe->vx_flags)
1023                                 vxlan_gpe->vx_flags =
1024                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
1025                         break;
1026                 case RTE_FLOW_ITEM_TYPE_GRE:
1027                 case RTE_FLOW_ITEM_TYPE_NVGRE:
1028                         gre = (struct gre_hdr *)&buf[temp_size];
1029                         if (!gre->proto)
1030                                 return rte_flow_error_set(error, EINVAL,
1031                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1032                                                 (void *)items->type,
1033                                                 "next protocol not found");
1034                         if (!ipv4 && !ipv6)
1035                                 return rte_flow_error_set(error, EINVAL,
1036                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1037                                                 (void *)items->type,
1038                                                 "ip header not found");
1039                         if (ipv4 && !ipv4->next_proto_id)
1040                                 ipv4->next_proto_id = IPPROTO_GRE;
1041                         else if (ipv6 && !ipv6->proto)
1042                                 ipv6->proto = IPPROTO_GRE;
1043                         break;
1044                 case RTE_FLOW_ITEM_TYPE_VOID:
1045                         break;
1046                 default:
1047                         return rte_flow_error_set(error, EINVAL,
1048                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1049                                                   (void *)items->type,
1050                                                   "unsupported item type");
1051                         break;
1052                 }
1053                 temp_size += len;
1054         }
1055         *size = temp_size;
1056         return 0;
1057 }
1058
1059 /**
1060  * Convert L2 encap action to DV specification.
1061  *
1062  * @param[in] dev
1063  *   Pointer to rte_eth_dev structure.
1064  * @param[in] action
1065  *   Pointer to action structure.
1066  * @param[in, out] dev_flow
1067  *   Pointer to the mlx5_flow.
1068  * @param[out] error
1069  *   Pointer to the error structure.
1070  *
1071  * @return
1072  *   0 on success, a negative errno value otherwise and rte_errno is set.
1073  */
1074 static int
1075 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
1076                                const struct rte_flow_action *action,
1077                                struct mlx5_flow *dev_flow,
1078                                struct rte_flow_error *error)
1079 {
1080         const struct rte_flow_item *encap_data;
1081         const struct rte_flow_action_raw_encap *raw_encap_data;
1082         struct mlx5_flow_dv_encap_decap_resource res = {
1083                 .reformat_type =
1084                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
1085                 .ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
1086         };
1087
1088         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
1089                 raw_encap_data =
1090                         (const struct rte_flow_action_raw_encap *)action->conf;
1091                 res.size = raw_encap_data->size;
1092                 memcpy(res.buf, raw_encap_data->data, res.size);
1093         } else {
1094                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
1095                         encap_data =
1096                                 ((const struct rte_flow_action_vxlan_encap *)
1097                                                 action->conf)->definition;
1098                 else
1099                         encap_data =
1100                                 ((const struct rte_flow_action_nvgre_encap *)
1101                                                 action->conf)->definition;
1102                 if (flow_dv_convert_encap_data(encap_data, res.buf,
1103                                                &res.size, error))
1104                         return -rte_errno;
1105         }
1106         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1107                 return rte_flow_error_set(error, EINVAL,
1108                                           RTE_FLOW_ERROR_TYPE_ACTION,
1109                                           NULL, "can't create L2 encap action");
1110         return 0;
1111 }
1112
1113 /**
1114  * Convert L2 decap action to DV specification.
1115  *
1116  * @param[in] dev
1117  *   Pointer to rte_eth_dev structure.
1118  * @param[in, out] dev_flow
1119  *   Pointer to the mlx5_flow.
1120  * @param[out] error
1121  *   Pointer to the error structure.
1122  *
1123  * @return
1124  *   0 on success, a negative errno value otherwise and rte_errno is set.
1125  */
1126 static int
1127 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
1128                                struct mlx5_flow *dev_flow,
1129                                struct rte_flow_error *error)
1130 {
1131         struct mlx5_flow_dv_encap_decap_resource res = {
1132                 .size = 0,
1133                 .reformat_type =
1134                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
1135                 .ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
1136         };
1137
1138         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1139                 return rte_flow_error_set(error, EINVAL,
1140                                           RTE_FLOW_ERROR_TYPE_ACTION,
1141                                           NULL, "can't create L2 decap action");
1142         return 0;
1143 }
1144
1145 /**
1146  * Convert raw decap/encap (L3 tunnel) action to DV specification.
1147  *
1148  * @param[in] dev
1149  *   Pointer to rte_eth_dev structure.
1150  * @param[in] action
1151  *   Pointer to action structure.
1152  * @param[in, out] dev_flow
1153  *   Pointer to the mlx5_flow.
1154  * @param[in] attr
1155  *   Pointer to the flow attributes.
1156  * @param[out] error
1157  *   Pointer to the error structure.
1158  *
1159  * @return
1160  *   0 on success, a negative errno value otherwise and rte_errno is set.
1161  */
1162 static int
1163 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
1164                                 const struct rte_flow_action *action,
1165                                 struct mlx5_flow *dev_flow,
1166                                 const struct rte_flow_attr *attr,
1167                                 struct rte_flow_error *error)
1168 {
1169         const struct rte_flow_action_raw_encap *encap_data;
1170         struct mlx5_flow_dv_encap_decap_resource res;
1171
1172         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
1173         res.size = encap_data->size;
1174         memcpy(res.buf, encap_data->data, res.size);
1175         res.reformat_type = attr->egress ?
1176                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL :
1177                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
1178         res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
1179                                      MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
1180         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1181                 return rte_flow_error_set(error, EINVAL,
1182                                           RTE_FLOW_ERROR_TYPE_ACTION,
1183                                           NULL, "can't create encap action");
1184         return 0;
1185 }
1186
1187 /**
1188  * Validate the modify-header actions.
1189  *
1190  * @param[in] action_flags
1191  *   Holds the actions detected until now.
1192  * @param[in] action
1193  *   Pointer to the modify action.
1194  * @param[out] error
1195  *   Pointer to error structure.
1196  *
1197  * @return
1198  *   0 on success, a negative errno value otherwise and rte_errno is set.
1199  */
1200 static int
1201 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
1202                                    const struct rte_flow_action *action,
1203                                    struct rte_flow_error *error)
1204 {
1205         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
1206                 return rte_flow_error_set(error, EINVAL,
1207                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1208                                           NULL, "action configuration not set");
1209         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
1210                 return rte_flow_error_set(error, EINVAL,
1211                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1212                                           "can't have encap action before"
1213                                           " modify action");
1214         return 0;
1215 }
1216
1217 /**
1218  * Validate the modify-header MAC address actions.
1219  *
1220  * @param[in] action_flags
1221  *   Holds the actions detected until now.
1222  * @param[in] action
1223  *   Pointer to the modify action.
1224  * @param[in] item_flags
1225  *   Holds the items detected.
1226  * @param[out] error
1227  *   Pointer to error structure.
1228  *
1229  * @return
1230  *   0 on success, a negative errno value otherwise and rte_errno is set.
1231  */
1232 static int
1233 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
1234                                    const struct rte_flow_action *action,
1235                                    const uint64_t item_flags,
1236                                    struct rte_flow_error *error)
1237 {
1238         int ret = 0;
1239
1240         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1241         if (!ret) {
1242                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
1243                         return rte_flow_error_set(error, EINVAL,
1244                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1245                                                   NULL,
1246                                                   "no L2 item in pattern");
1247         }
1248         return ret;
1249 }
1250
1251 /**
1252  * Validate the modify-header IPv4 address actions.
1253  *
1254  * @param[in] action_flags
1255  *   Holds the actions detected until now.
1256  * @param[in] action
1257  *   Pointer to the modify action.
1258  * @param[in] item_flags
1259  *   Holds the items detected.
1260  * @param[out] error
1261  *   Pointer to error structure.
1262  *
1263  * @return
1264  *   0 on success, a negative errno value otherwise and rte_errno is set.
1265  */
1266 static int
1267 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
1268                                     const struct rte_flow_action *action,
1269                                     const uint64_t item_flags,
1270                                     struct rte_flow_error *error)
1271 {
1272         int ret = 0;
1273
1274         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1275         if (!ret) {
1276                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
1277                         return rte_flow_error_set(error, EINVAL,
1278                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1279                                                   NULL,
1280                                                   "no ipv4 item in pattern");
1281         }
1282         return ret;
1283 }
1284
1285 /**
1286  * Validate the modify-header IPv6 address actions.
1287  *
1288  * @param[in] action_flags
1289  *   Holds the actions detected until now.
1290  * @param[in] action
1291  *   Pointer to the modify action.
1292  * @param[in] item_flags
1293  *   Holds the items detected.
1294  * @param[out] error
1295  *   Pointer to error structure.
1296  *
1297  * @return
1298  *   0 on success, a negative errno value otherwise and rte_errno is set.
1299  */
1300 static int
1301 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
1302                                     const struct rte_flow_action *action,
1303                                     const uint64_t item_flags,
1304                                     struct rte_flow_error *error)
1305 {
1306         int ret = 0;
1307
1308         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1309         if (!ret) {
1310                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
1311                         return rte_flow_error_set(error, EINVAL,
1312                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1313                                                   NULL,
1314                                                   "no ipv6 item in pattern");
1315         }
1316         return ret;
1317 }
1318
1319 /**
1320  * Validate the modify-header TP actions.
1321  *
1322  * @param[in] action_flags
1323  *   Holds the actions detected until now.
1324  * @param[in] action
1325  *   Pointer to the modify action.
1326  * @param[in] item_flags
1327  *   Holds the items detected.
1328  * @param[out] error
1329  *   Pointer to error structure.
1330  *
1331  * @return
1332  *   0 on success, a negative errno value otherwise and rte_errno is set.
1333  */
1334 static int
1335 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
1336                                   const struct rte_flow_action *action,
1337                                   const uint64_t item_flags,
1338                                   struct rte_flow_error *error)
1339 {
1340         int ret = 0;
1341
1342         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1343         if (!ret) {
1344                 if (!(item_flags & MLX5_FLOW_LAYER_L4))
1345                         return rte_flow_error_set(error, EINVAL,
1346                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1347                                                   NULL, "no transport layer "
1348                                                   "in pattern");
1349         }
1350         return ret;
1351 }
1352
1353 /**
1354  * Validate the modify-header TTL actions.
1355  *
1356  * @param[in] action_flags
1357  *   Holds the actions detected until now.
1358  * @param[in] action
1359  *   Pointer to the modify action.
1360  * @param[in] item_flags
1361  *   Holds the items detected.
1362  * @param[out] error
1363  *   Pointer to error structure.
1364  *
1365  * @return
1366  *   0 on success, a negative errno value otherwise and rte_errno is set.
1367  */
1368 static int
1369 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
1370                                    const struct rte_flow_action *action,
1371                                    const uint64_t item_flags,
1372                                    struct rte_flow_error *error)
1373 {
1374         int ret = 0;
1375
1376         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1377         if (!ret) {
1378                 if (!(item_flags & MLX5_FLOW_LAYER_L3))
1379                         return rte_flow_error_set(error, EINVAL,
1380                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1381                                                   NULL,
1382                                                   "no IP protocol in pattern");
1383         }
1384         return ret;
1385 }
1386
1387 /**
1388  * Find existing modify-header resource or create and register a new one.
1389  *
1390  * @param dev[in, out]
1391  *   Pointer to rte_eth_dev structure.
1392  * @param[in, out] resource
1393  *   Pointer to modify-header resource.
1394  * @parm[in, out] dev_flow
1395  *   Pointer to the dev_flow.
1396  * @param[out] error
1397  *   pointer to error structure.
1398  *
1399  * @return
1400  *   0 on success otherwise -errno and errno is set.
1401  */
1402 static int
1403 flow_dv_modify_hdr_resource_register
1404                         (struct rte_eth_dev *dev,
1405                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1406                          struct mlx5_flow *dev_flow,
1407                          struct rte_flow_error *error)
1408 {
1409         struct priv *priv = dev->data->dev_private;
1410         struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
1411
1412         /* Lookup a matching resource from cache. */
1413         LIST_FOREACH(cache_resource, &priv->modify_cmds, next) {
1414                 if (resource->ft_type == cache_resource->ft_type &&
1415                     resource->actions_num == cache_resource->actions_num &&
1416                     !memcmp((const void *)resource->actions,
1417                             (const void *)cache_resource->actions,
1418                             (resource->actions_num *
1419                                             sizeof(resource->actions[0])))) {
1420                         DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++",
1421                                 (void *)cache_resource,
1422                                 rte_atomic32_read(&cache_resource->refcnt));
1423                         rte_atomic32_inc(&cache_resource->refcnt);
1424                         dev_flow->dv.modify_hdr = cache_resource;
1425                         return 0;
1426                 }
1427         }
1428         /* Register new modify-header resource. */
1429         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1430         if (!cache_resource)
1431                 return rte_flow_error_set(error, ENOMEM,
1432                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1433                                           "cannot allocate resource memory");
1434         *cache_resource = *resource;
1435         cache_resource->verbs_action =
1436                 mlx5_glue->dv_create_flow_action_modify_header
1437                                         (priv->ctx,
1438                                          cache_resource->actions_num *
1439                                          sizeof(cache_resource->actions[0]),
1440                                          (uint64_t *)cache_resource->actions,
1441                                          cache_resource->ft_type);
1442         if (!cache_resource->verbs_action) {
1443                 rte_free(cache_resource);
1444                 return rte_flow_error_set(error, ENOMEM,
1445                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1446                                           NULL, "cannot create action");
1447         }
1448         rte_atomic32_init(&cache_resource->refcnt);
1449         rte_atomic32_inc(&cache_resource->refcnt);
1450         LIST_INSERT_HEAD(&priv->modify_cmds, cache_resource, next);
1451         dev_flow->dv.modify_hdr = cache_resource;
1452         DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
1453                 (void *)cache_resource,
1454                 rte_atomic32_read(&cache_resource->refcnt));
1455         return 0;
1456 }
1457
1458 /**
1459  * Verify the @p attributes will be correctly understood by the NIC and store
1460  * them in the @p flow if everything is correct.
1461  *
1462  * @param[in] dev
1463  *   Pointer to dev struct.
1464  * @param[in] attributes
1465  *   Pointer to flow attributes
1466  * @param[out] error
1467  *   Pointer to error structure.
1468  *
1469  * @return
1470  *   0 on success, a negative errno value otherwise and rte_errno is set.
1471  */
1472 static int
1473 flow_dv_validate_attributes(struct rte_eth_dev *dev,
1474                             const struct rte_flow_attr *attributes,
1475                             struct rte_flow_error *error)
1476 {
1477         struct priv *priv = dev->data->dev_private;
1478         uint32_t priority_max = priv->config.flow_prio - 1;
1479
1480         if (attributes->group)
1481                 return rte_flow_error_set(error, ENOTSUP,
1482                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1483                                           NULL,
1484                                           "groups is not supported");
1485         if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
1486             attributes->priority >= priority_max)
1487                 return rte_flow_error_set(error, ENOTSUP,
1488                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1489                                           NULL,
1490                                           "priority out of range");
1491         if (attributes->transfer)
1492                 return rte_flow_error_set(error, ENOTSUP,
1493                                           RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1494                                           NULL,
1495                                           "transfer is not supported");
1496         if (!(attributes->egress ^ attributes->ingress))
1497                 return rte_flow_error_set(error, ENOTSUP,
1498                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1499                                           "must specify exactly one of "
1500                                           "ingress or egress");
1501         return 0;
1502 }
1503
1504 /**
1505  * Internal validation function. For validating both actions and items.
1506  *
1507  * @param[in] dev
1508  *   Pointer to the rte_eth_dev structure.
1509  * @param[in] attr
1510  *   Pointer to the flow attributes.
1511  * @param[in] items
1512  *   Pointer to the list of items.
1513  * @param[in] actions
1514  *   Pointer to the list of actions.
1515  * @param[out] error
1516  *   Pointer to the error structure.
1517  *
1518  * @return
1519  *   0 on success, a negative errno value otherwise and rte_ernno is set.
1520  */
1521 static int
1522 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
1523                  const struct rte_flow_item items[],
1524                  const struct rte_flow_action actions[],
1525                  struct rte_flow_error *error)
1526 {
1527         int ret;
1528         uint64_t action_flags = 0;
1529         uint64_t item_flags = 0;
1530         uint64_t last_item = 0;
1531         int tunnel = 0;
1532         uint8_t next_protocol = 0xff;
1533         int actions_n = 0;
1534
1535         if (items == NULL)
1536                 return -1;
1537         ret = flow_dv_validate_attributes(dev, attr, error);
1538         if (ret < 0)
1539                 return ret;
1540         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1541                 tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1542                 switch (items->type) {
1543                 case RTE_FLOW_ITEM_TYPE_VOID:
1544                         break;
1545                 case RTE_FLOW_ITEM_TYPE_ETH:
1546                         ret = mlx5_flow_validate_item_eth(items, item_flags,
1547                                                           error);
1548                         if (ret < 0)
1549                                 return ret;
1550                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1551                                              MLX5_FLOW_LAYER_OUTER_L2;
1552                         break;
1553                 case RTE_FLOW_ITEM_TYPE_VLAN:
1554                         ret = mlx5_flow_validate_item_vlan(items, item_flags,
1555                                                            error);
1556                         if (ret < 0)
1557                                 return ret;
1558                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
1559                                              MLX5_FLOW_LAYER_OUTER_VLAN;
1560                         break;
1561                 case RTE_FLOW_ITEM_TYPE_IPV4:
1562                         ret = mlx5_flow_validate_item_ipv4(items, item_flags,
1563                                                            error);
1564                         if (ret < 0)
1565                                 return ret;
1566                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1567                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1568                         if (items->mask != NULL &&
1569                             ((const struct rte_flow_item_ipv4 *)
1570                              items->mask)->hdr.next_proto_id) {
1571                                 next_protocol =
1572                                         ((const struct rte_flow_item_ipv4 *)
1573                                          (items->spec))->hdr.next_proto_id;
1574                                 next_protocol &=
1575                                         ((const struct rte_flow_item_ipv4 *)
1576                                          (items->mask))->hdr.next_proto_id;
1577                         } else {
1578                                 /* Reset for inner layer. */
1579                                 next_protocol = 0xff;
1580                         }
1581                         break;
1582                 case RTE_FLOW_ITEM_TYPE_IPV6:
1583                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
1584                                                            error);
1585                         if (ret < 0)
1586                                 return ret;
1587                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1588                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1589                         if (items->mask != NULL &&
1590                             ((const struct rte_flow_item_ipv6 *)
1591                              items->mask)->hdr.proto) {
1592                                 next_protocol =
1593                                         ((const struct rte_flow_item_ipv6 *)
1594                                          items->spec)->hdr.proto;
1595                                 next_protocol &=
1596                                         ((const struct rte_flow_item_ipv6 *)
1597                                          items->mask)->hdr.proto;
1598                         } else {
1599                                 /* Reset for inner layer. */
1600                                 next_protocol = 0xff;
1601                         }
1602                         break;
1603                 case RTE_FLOW_ITEM_TYPE_TCP:
1604                         ret = mlx5_flow_validate_item_tcp
1605                                                 (items, item_flags,
1606                                                  next_protocol,
1607                                                  &rte_flow_item_tcp_mask,
1608                                                  error);
1609                         if (ret < 0)
1610                                 return ret;
1611                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
1612                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
1613                         break;
1614                 case RTE_FLOW_ITEM_TYPE_UDP:
1615                         ret = mlx5_flow_validate_item_udp(items, item_flags,
1616                                                           next_protocol,
1617                                                           error);
1618                         if (ret < 0)
1619                                 return ret;
1620                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
1621                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
1622                         break;
1623                 case RTE_FLOW_ITEM_TYPE_GRE:
1624                 case RTE_FLOW_ITEM_TYPE_NVGRE:
1625                         ret = mlx5_flow_validate_item_gre(items, item_flags,
1626                                                           next_protocol, error);
1627                         if (ret < 0)
1628                                 return ret;
1629                         last_item = MLX5_FLOW_LAYER_GRE;
1630                         break;
1631                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1632                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
1633                                                             error);
1634                         if (ret < 0)
1635                                 return ret;
1636                         last_item = MLX5_FLOW_LAYER_VXLAN;
1637                         break;
1638                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1639                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
1640                                                                 item_flags, dev,
1641                                                                 error);
1642                         if (ret < 0)
1643                                 return ret;
1644                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
1645                         break;
1646                 case RTE_FLOW_ITEM_TYPE_MPLS:
1647                         ret = mlx5_flow_validate_item_mpls(dev, items,
1648                                                            item_flags,
1649                                                            last_item, error);
1650                         if (ret < 0)
1651                                 return ret;
1652                         last_item = MLX5_FLOW_LAYER_MPLS;
1653                         break;
1654                 case RTE_FLOW_ITEM_TYPE_META:
1655                         ret = flow_dv_validate_item_meta(dev, items, attr,
1656                                                          error);
1657                         if (ret < 0)
1658                                 return ret;
1659                         last_item = MLX5_FLOW_ITEM_METADATA;
1660                         break;
1661                 default:
1662                         return rte_flow_error_set(error, ENOTSUP,
1663                                                   RTE_FLOW_ERROR_TYPE_ITEM,
1664                                                   NULL, "item not supported");
1665                 }
1666                 item_flags |= last_item;
1667         }
1668         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1669                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
1670                         return rte_flow_error_set(error, ENOTSUP,
1671                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1672                                                   actions, "too many actions");
1673                 switch (actions->type) {
1674                 case RTE_FLOW_ACTION_TYPE_VOID:
1675                         break;
1676                 case RTE_FLOW_ACTION_TYPE_FLAG:
1677                         ret = mlx5_flow_validate_action_flag(action_flags,
1678                                                              attr, error);
1679                         if (ret < 0)
1680                                 return ret;
1681                         action_flags |= MLX5_FLOW_ACTION_FLAG;
1682                         ++actions_n;
1683                         break;
1684                 case RTE_FLOW_ACTION_TYPE_MARK:
1685                         ret = mlx5_flow_validate_action_mark(actions,
1686                                                              action_flags,
1687                                                              attr, error);
1688                         if (ret < 0)
1689                                 return ret;
1690                         action_flags |= MLX5_FLOW_ACTION_MARK;
1691                         ++actions_n;
1692                         break;
1693                 case RTE_FLOW_ACTION_TYPE_DROP:
1694                         ret = mlx5_flow_validate_action_drop(action_flags,
1695                                                              attr, error);
1696                         if (ret < 0)
1697                                 return ret;
1698                         action_flags |= MLX5_FLOW_ACTION_DROP;
1699                         ++actions_n;
1700                         break;
1701                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1702                         ret = mlx5_flow_validate_action_queue(actions,
1703                                                               action_flags, dev,
1704                                                               attr, error);
1705                         if (ret < 0)
1706                                 return ret;
1707                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
1708                         ++actions_n;
1709                         break;
1710                 case RTE_FLOW_ACTION_TYPE_RSS:
1711                         ret = mlx5_flow_validate_action_rss(actions,
1712                                                             action_flags, dev,
1713                                                             attr, error);
1714                         if (ret < 0)
1715                                 return ret;
1716                         action_flags |= MLX5_FLOW_ACTION_RSS;
1717                         ++actions_n;
1718                         break;
1719                 case RTE_FLOW_ACTION_TYPE_COUNT:
1720                         ret = mlx5_flow_validate_action_count(dev, attr, error);
1721                         if (ret < 0)
1722                                 return ret;
1723                         action_flags |= MLX5_FLOW_ACTION_COUNT;
1724                         ++actions_n;
1725                         break;
1726                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
1727                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
1728                         ret = flow_dv_validate_action_l2_encap(action_flags,
1729                                                                actions, attr,
1730                                                                error);
1731                         if (ret < 0)
1732                                 return ret;
1733                         action_flags |= actions->type ==
1734                                         RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
1735                                         MLX5_FLOW_ACTION_VXLAN_ENCAP :
1736                                         MLX5_FLOW_ACTION_NVGRE_ENCAP;
1737                         ++actions_n;
1738                         break;
1739                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
1740                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
1741                         ret = flow_dv_validate_action_l2_decap(action_flags,
1742                                                                attr, error);
1743                         if (ret < 0)
1744                                 return ret;
1745                         action_flags |= actions->type ==
1746                                         RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
1747                                         MLX5_FLOW_ACTION_VXLAN_DECAP :
1748                                         MLX5_FLOW_ACTION_NVGRE_DECAP;
1749                         ++actions_n;
1750                         break;
1751                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
1752                         ret = flow_dv_validate_action_raw_encap(action_flags,
1753                                                                 actions, attr,
1754                                                                 error);
1755                         if (ret < 0)
1756                                 return ret;
1757                         action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
1758                         ++actions_n;
1759                         break;
1760                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
1761                         ret = flow_dv_validate_action_raw_decap(action_flags,
1762                                                                 actions, attr,
1763                                                                 error);
1764                         if (ret < 0)
1765                                 return ret;
1766                         action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
1767                         ++actions_n;
1768                         break;
1769                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
1770                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
1771                         ret = flow_dv_validate_action_modify_mac(action_flags,
1772                                                                  actions,
1773                                                                  item_flags,
1774                                                                  error);
1775                         if (ret < 0)
1776                                 return ret;
1777                         /* Count all modify-header actions as one action. */
1778                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
1779                                 ++actions_n;
1780                         action_flags |= actions->type ==
1781                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
1782                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
1783                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
1784                         break;
1785
1786                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
1787                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
1788                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
1789                                                                   actions,
1790                                                                   item_flags,
1791                                                                   error);
1792                         if (ret < 0)
1793                                 return ret;
1794                         /* Count all modify-header actions as one action. */
1795                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
1796                                 ++actions_n;
1797                         action_flags |= actions->type ==
1798                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
1799                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
1800                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
1801                         break;
1802                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
1803                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
1804                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
1805                                                                   actions,
1806                                                                   item_flags,
1807                                                                   error);
1808                         if (ret < 0)
1809                                 return ret;
1810                         /* Count all modify-header actions as one action. */
1811                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
1812                                 ++actions_n;
1813                         action_flags |= actions->type ==
1814                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
1815                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
1816                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
1817                         break;
1818                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
1819                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
1820                         ret = flow_dv_validate_action_modify_tp(action_flags,
1821                                                                 actions,
1822                                                                 item_flags,
1823                                                                 error);
1824                         if (ret < 0)
1825                                 return ret;
1826                         /* Count all modify-header actions as one action. */
1827                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
1828                                 ++actions_n;
1829                         action_flags |= actions->type ==
1830                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
1831                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
1832                                                 MLX5_FLOW_ACTION_SET_TP_DST;
1833                         break;
1834                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
1835                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
1836                         ret = flow_dv_validate_action_modify_ttl(action_flags,
1837                                                                  actions,
1838                                                                  item_flags,
1839                                                                  error);
1840                         if (ret < 0)
1841                                 return ret;
1842                         /* Count all modify-header actions as one action. */
1843                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
1844                                 ++actions_n;
1845                         action_flags |= actions->type ==
1846                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
1847                                                 MLX5_FLOW_ACTION_SET_TTL :
1848                                                 MLX5_FLOW_ACTION_DEC_TTL;
1849                         break;
1850                 default:
1851                         return rte_flow_error_set(error, ENOTSUP,
1852                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1853                                                   actions,
1854                                                   "action not supported");
1855                 }
1856         }
1857         if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
1858                 return rte_flow_error_set(error, EINVAL,
1859                                           RTE_FLOW_ERROR_TYPE_ACTION, actions,
1860                                           "no fate action is found");
1861         return 0;
1862 }
1863
1864 /**
1865  * Internal preparation function. Allocates the DV flow size,
1866  * this size is constant.
1867  *
1868  * @param[in] attr
1869  *   Pointer to the flow attributes.
1870  * @param[in] items
1871  *   Pointer to the list of items.
1872  * @param[in] actions
1873  *   Pointer to the list of actions.
1874  * @param[out] error
1875  *   Pointer to the error structure.
1876  *
1877  * @return
1878  *   Pointer to mlx5_flow object on success,
1879  *   otherwise NULL and rte_ernno is set.
1880  */
1881 static struct mlx5_flow *
1882 flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
1883                 const struct rte_flow_item items[] __rte_unused,
1884                 const struct rte_flow_action actions[] __rte_unused,
1885                 struct rte_flow_error *error)
1886 {
1887         uint32_t size = sizeof(struct mlx5_flow);
1888         struct mlx5_flow *flow;
1889
1890         flow = rte_calloc(__func__, 1, size, 0);
1891         if (!flow) {
1892                 rte_flow_error_set(error, ENOMEM,
1893                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1894                                    "not enough memory to create flow");
1895                 return NULL;
1896         }
1897         flow->dv.value.size = MLX5_ST_SZ_DB(fte_match_param);
1898         return flow;
1899 }
1900
1901 #ifndef NDEBUG
1902 /**
1903  * Sanity check for match mask and value. Similar to check_valid_spec() in
1904  * kernel driver. If unmasked bit is present in value, it returns failure.
1905  *
1906  * @param match_mask
1907  *   pointer to match mask buffer.
1908  * @param match_value
1909  *   pointer to match value buffer.
1910  *
1911  * @return
1912  *   0 if valid, -EINVAL otherwise.
1913  */
1914 static int
1915 flow_dv_check_valid_spec(void *match_mask, void *match_value)
1916 {
1917         uint8_t *m = match_mask;
1918         uint8_t *v = match_value;
1919         unsigned int i;
1920
1921         for (i = 0; i < MLX5_ST_SZ_DB(fte_match_param); ++i) {
1922                 if (v[i] & ~m[i]) {
1923                         DRV_LOG(ERR,
1924                                 "match_value differs from match_criteria"
1925                                 " %p[%u] != %p[%u]",
1926                                 match_value, i, match_mask, i);
1927                         return -EINVAL;
1928                 }
1929         }
1930         return 0;
1931 }
1932 #endif
1933
1934 /**
1935  * Add Ethernet item to matcher and to the value.
1936  *
1937  * @param[in, out] matcher
1938  *   Flow matcher.
1939  * @param[in, out] key
1940  *   Flow matcher value.
1941  * @param[in] item
1942  *   Flow pattern to translate.
1943  * @param[in] inner
1944  *   Item is inner pattern.
1945  */
1946 static void
1947 flow_dv_translate_item_eth(void *matcher, void *key,
1948                            const struct rte_flow_item *item, int inner)
1949 {
1950         const struct rte_flow_item_eth *eth_m = item->mask;
1951         const struct rte_flow_item_eth *eth_v = item->spec;
1952         const struct rte_flow_item_eth nic_mask = {
1953                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
1954                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
1955                 .type = RTE_BE16(0xffff),
1956         };
1957         void *headers_m;
1958         void *headers_v;
1959         char *l24_v;
1960         unsigned int i;
1961
1962         if (!eth_v)
1963                 return;
1964         if (!eth_m)
1965                 eth_m = &nic_mask;
1966         if (inner) {
1967                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1968                                          inner_headers);
1969                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
1970         } else {
1971                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1972                                          outer_headers);
1973                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
1974         }
1975         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
1976                &eth_m->dst, sizeof(eth_m->dst));
1977         /* The value must be in the range of the mask. */
1978         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
1979         for (i = 0; i < sizeof(eth_m->dst); ++i)
1980                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
1981         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
1982                &eth_m->src, sizeof(eth_m->src));
1983         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
1984         /* The value must be in the range of the mask. */
1985         for (i = 0; i < sizeof(eth_m->dst); ++i)
1986                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
1987         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
1988                  rte_be_to_cpu_16(eth_m->type));
1989         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
1990         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
1991 }
1992
1993 /**
1994  * Add VLAN item to matcher and to the value.
1995  *
1996  * @param[in, out] matcher
1997  *   Flow matcher.
1998  * @param[in, out] key
1999  *   Flow matcher value.
2000  * @param[in] item
2001  *   Flow pattern to translate.
2002  * @param[in] inner
2003  *   Item is inner pattern.
2004  */
2005 static void
2006 flow_dv_translate_item_vlan(void *matcher, void *key,
2007                             const struct rte_flow_item *item,
2008                             int inner)
2009 {
2010         const struct rte_flow_item_vlan *vlan_m = item->mask;
2011         const struct rte_flow_item_vlan *vlan_v = item->spec;
2012         const struct rte_flow_item_vlan nic_mask = {
2013                 .tci = RTE_BE16(0x0fff),
2014                 .inner_type = RTE_BE16(0xffff),
2015         };
2016         void *headers_m;
2017         void *headers_v;
2018         uint16_t tci_m;
2019         uint16_t tci_v;
2020
2021         if (!vlan_v)
2022                 return;
2023         if (!vlan_m)
2024                 vlan_m = &nic_mask;
2025         if (inner) {
2026                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2027                                          inner_headers);
2028                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2029         } else {
2030                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2031                                          outer_headers);
2032                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2033         }
2034         tci_m = rte_be_to_cpu_16(vlan_m->tci);
2035         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
2036         MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
2037         MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
2038         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
2039         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
2040         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
2041         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
2042         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
2043         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
2044 }
2045
2046 /**
2047  * Add IPV4 item to matcher and to the value.
2048  *
2049  * @param[in, out] matcher
2050  *   Flow matcher.
2051  * @param[in, out] key
2052  *   Flow matcher value.
2053  * @param[in] item
2054  *   Flow pattern to translate.
2055  * @param[in] inner
2056  *   Item is inner pattern.
2057  */
2058 static void
2059 flow_dv_translate_item_ipv4(void *matcher, void *key,
2060                             const struct rte_flow_item *item,
2061                             int inner)
2062 {
2063         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
2064         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
2065         const struct rte_flow_item_ipv4 nic_mask = {
2066                 .hdr = {
2067                         .src_addr = RTE_BE32(0xffffffff),
2068                         .dst_addr = RTE_BE32(0xffffffff),
2069                         .type_of_service = 0xff,
2070                         .next_proto_id = 0xff,
2071                 },
2072         };
2073         void *headers_m;
2074         void *headers_v;
2075         char *l24_m;
2076         char *l24_v;
2077         uint8_t tos;
2078
2079         if (inner) {
2080                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2081                                          inner_headers);
2082                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2083         } else {
2084                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2085                                          outer_headers);
2086                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2087         }
2088         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
2089         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
2090         if (!ipv4_v)
2091                 return;
2092         if (!ipv4_m)
2093                 ipv4_m = &nic_mask;
2094         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2095                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
2096         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2097                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
2098         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
2099         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
2100         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2101                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
2102         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2103                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
2104         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
2105         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
2106         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
2107         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
2108                  ipv4_m->hdr.type_of_service);
2109         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
2110         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
2111                  ipv4_m->hdr.type_of_service >> 2);
2112         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
2113         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
2114                  ipv4_m->hdr.next_proto_id);
2115         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2116                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
2117 }
2118
2119 /**
2120  * Add IPV6 item to matcher and to the value.
2121  *
2122  * @param[in, out] matcher
2123  *   Flow matcher.
2124  * @param[in, out] key
2125  *   Flow matcher value.
2126  * @param[in] item
2127  *   Flow pattern to translate.
2128  * @param[in] inner
2129  *   Item is inner pattern.
2130  */
2131 static void
2132 flow_dv_translate_item_ipv6(void *matcher, void *key,
2133                             const struct rte_flow_item *item,
2134                             int inner)
2135 {
2136         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
2137         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
2138         const struct rte_flow_item_ipv6 nic_mask = {
2139                 .hdr = {
2140                         .src_addr =
2141                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
2142                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
2143                         .dst_addr =
2144                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
2145                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
2146                         .vtc_flow = RTE_BE32(0xffffffff),
2147                         .proto = 0xff,
2148                         .hop_limits = 0xff,
2149                 },
2150         };
2151         void *headers_m;
2152         void *headers_v;
2153         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2154         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2155         char *l24_m;
2156         char *l24_v;
2157         uint32_t vtc_m;
2158         uint32_t vtc_v;
2159         int i;
2160         int size;
2161
2162         if (inner) {
2163                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2164                                          inner_headers);
2165                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2166         } else {
2167                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2168                                          outer_headers);
2169                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2170         }
2171         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
2172         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
2173         if (!ipv6_v)
2174                 return;
2175         if (!ipv6_m)
2176                 ipv6_m = &nic_mask;
2177         size = sizeof(ipv6_m->hdr.dst_addr);
2178         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2179                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
2180         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2181                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
2182         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
2183         for (i = 0; i < size; ++i)
2184                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
2185         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2186                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
2187         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2188                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
2189         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
2190         for (i = 0; i < size; ++i)
2191                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
2192         /* TOS. */
2193         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
2194         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
2195         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
2196         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
2197         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
2198         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
2199         /* Label. */
2200         if (inner) {
2201                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
2202                          vtc_m);
2203                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
2204                          vtc_v);
2205         } else {
2206                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
2207                          vtc_m);
2208                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
2209                          vtc_v);
2210         }
2211         /* Protocol. */
2212         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
2213                  ipv6_m->hdr.proto);
2214         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2215                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
2216 }
2217
2218 /**
2219  * Add TCP item to matcher and to the value.
2220  *
2221  * @param[in, out] matcher
2222  *   Flow matcher.
2223  * @param[in, out] key
2224  *   Flow matcher value.
2225  * @param[in] item
2226  *   Flow pattern to translate.
2227  * @param[in] inner
2228  *   Item is inner pattern.
2229  */
2230 static void
2231 flow_dv_translate_item_tcp(void *matcher, void *key,
2232                            const struct rte_flow_item *item,
2233                            int inner)
2234 {
2235         const struct rte_flow_item_tcp *tcp_m = item->mask;
2236         const struct rte_flow_item_tcp *tcp_v = item->spec;
2237         void *headers_m;
2238         void *headers_v;
2239
2240         if (inner) {
2241                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2242                                          inner_headers);
2243                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2244         } else {
2245                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2246                                          outer_headers);
2247                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2248         }
2249         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2250         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
2251         if (!tcp_v)
2252                 return;
2253         if (!tcp_m)
2254                 tcp_m = &rte_flow_item_tcp_mask;
2255         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
2256                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
2257         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
2258                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
2259         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
2260                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
2261         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
2262                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
2263 }
2264
2265 /**
2266  * Add UDP item to matcher and to the value.
2267  *
2268  * @param[in, out] matcher
2269  *   Flow matcher.
2270  * @param[in, out] key
2271  *   Flow matcher value.
2272  * @param[in] item
2273  *   Flow pattern to translate.
2274  * @param[in] inner
2275  *   Item is inner pattern.
2276  */
2277 static void
2278 flow_dv_translate_item_udp(void *matcher, void *key,
2279                            const struct rte_flow_item *item,
2280                            int inner)
2281 {
2282         const struct rte_flow_item_udp *udp_m = item->mask;
2283         const struct rte_flow_item_udp *udp_v = item->spec;
2284         void *headers_m;
2285         void *headers_v;
2286
2287         if (inner) {
2288                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2289                                          inner_headers);
2290                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2291         } else {
2292                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2293                                          outer_headers);
2294                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2295         }
2296         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2297         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
2298         if (!udp_v)
2299                 return;
2300         if (!udp_m)
2301                 udp_m = &rte_flow_item_udp_mask;
2302         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
2303                  rte_be_to_cpu_16(udp_m->hdr.src_port));
2304         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
2305                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
2306         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
2307                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
2308         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
2309                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
2310 }
2311
2312 /**
2313  * Add GRE item to matcher and to the value.
2314  *
2315  * @param[in, out] matcher
2316  *   Flow matcher.
2317  * @param[in, out] key
2318  *   Flow matcher value.
2319  * @param[in] item
2320  *   Flow pattern to translate.
2321  * @param[in] inner
2322  *   Item is inner pattern.
2323  */
2324 static void
2325 flow_dv_translate_item_gre(void *matcher, void *key,
2326                            const struct rte_flow_item *item,
2327                            int inner)
2328 {
2329         const struct rte_flow_item_gre *gre_m = item->mask;
2330         const struct rte_flow_item_gre *gre_v = item->spec;
2331         void *headers_m;
2332         void *headers_v;
2333         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2334         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2335
2336         if (inner) {
2337                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2338                                          inner_headers);
2339                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2340         } else {
2341                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2342                                          outer_headers);
2343                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2344         }
2345         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2346         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
2347         if (!gre_v)
2348                 return;
2349         if (!gre_m)
2350                 gre_m = &rte_flow_item_gre_mask;
2351         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
2352                  rte_be_to_cpu_16(gre_m->protocol));
2353         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
2354                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
2355 }
2356
2357 /**
2358  * Add NVGRE item to matcher and to the value.
2359  *
2360  * @param[in, out] matcher
2361  *   Flow matcher.
2362  * @param[in, out] key
2363  *   Flow matcher value.
2364  * @param[in] item
2365  *   Flow pattern to translate.
2366  * @param[in] inner
2367  *   Item is inner pattern.
2368  */
2369 static void
2370 flow_dv_translate_item_nvgre(void *matcher, void *key,
2371                              const struct rte_flow_item *item,
2372                              int inner)
2373 {
2374         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
2375         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
2376         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2377         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2378         const char *tni_flow_id_m = (const char *)nvgre_m->tni;
2379         const char *tni_flow_id_v = (const char *)nvgre_v->tni;
2380         char *gre_key_m;
2381         char *gre_key_v;
2382         int size;
2383         int i;
2384
2385         flow_dv_translate_item_gre(matcher, key, item, inner);
2386         if (!nvgre_v)
2387                 return;
2388         if (!nvgre_m)
2389                 nvgre_m = &rte_flow_item_nvgre_mask;
2390         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
2391         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
2392         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
2393         memcpy(gre_key_m, tni_flow_id_m, size);
2394         for (i = 0; i < size; ++i)
2395                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
2396 }
2397
2398 /**
2399  * Add VXLAN item to matcher and to the value.
2400  *
2401  * @param[in, out] matcher
2402  *   Flow matcher.
2403  * @param[in, out] key
2404  *   Flow matcher value.
2405  * @param[in] item
2406  *   Flow pattern to translate.
2407  * @param[in] inner
2408  *   Item is inner pattern.
2409  */
2410 static void
2411 flow_dv_translate_item_vxlan(void *matcher, void *key,
2412                              const struct rte_flow_item *item,
2413                              int inner)
2414 {
2415         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
2416         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
2417         void *headers_m;
2418         void *headers_v;
2419         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2420         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2421         char *vni_m;
2422         char *vni_v;
2423         uint16_t dport;
2424         int size;
2425         int i;
2426
2427         if (inner) {
2428                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2429                                          inner_headers);
2430                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2431         } else {
2432                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2433                                          outer_headers);
2434                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2435         }
2436         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
2437                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
2438         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
2439                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
2440                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
2441         }
2442         if (!vxlan_v)
2443                 return;
2444         if (!vxlan_m)
2445                 vxlan_m = &rte_flow_item_vxlan_mask;
2446         size = sizeof(vxlan_m->vni);
2447         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
2448         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
2449         memcpy(vni_m, vxlan_m->vni, size);
2450         for (i = 0; i < size; ++i)
2451                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
2452 }
2453
2454 /**
2455  * Add MPLS item to matcher and to the value.
2456  *
2457  * @param[in, out] matcher
2458  *   Flow matcher.
2459  * @param[in, out] key
2460  *   Flow matcher value.
2461  * @param[in] item
2462  *   Flow pattern to translate.
2463  * @param[in] prev_layer
2464  *   The protocol layer indicated in previous item.
2465  * @param[in] inner
2466  *   Item is inner pattern.
2467  */
2468 static void
2469 flow_dv_translate_item_mpls(void *matcher, void *key,
2470                             const struct rte_flow_item *item,
2471                             uint64_t prev_layer,
2472                             int inner)
2473 {
2474         const uint32_t *in_mpls_m = item->mask;
2475         const uint32_t *in_mpls_v = item->spec;
2476         uint32_t *out_mpls_m = 0;
2477         uint32_t *out_mpls_v = 0;
2478         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2479         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2480         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
2481                                      misc_parameters_2);
2482         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
2483         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
2484         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2485
2486         switch (prev_layer) {
2487         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
2488                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
2489                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
2490                          MLX5_UDP_PORT_MPLS);
2491                 break;
2492         case MLX5_FLOW_LAYER_GRE:
2493                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
2494                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
2495                          ETHER_TYPE_MPLS);
2496                 break;
2497         default:
2498                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2499                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2500                          IPPROTO_MPLS);
2501                 break;
2502         }
2503         if (!in_mpls_v)
2504                 return;
2505         if (!in_mpls_m)
2506                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
2507         switch (prev_layer) {
2508         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
2509                 out_mpls_m =
2510                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
2511                                                  outer_first_mpls_over_udp);
2512                 out_mpls_v =
2513                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
2514                                                  outer_first_mpls_over_udp);
2515                 break;
2516         case MLX5_FLOW_LAYER_GRE:
2517                 out_mpls_m =
2518                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
2519                                                  outer_first_mpls_over_gre);
2520                 out_mpls_v =
2521                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
2522                                                  outer_first_mpls_over_gre);
2523                 break;
2524         default:
2525                 /* Inner MPLS not over GRE is not supported. */
2526                 if (!inner) {
2527                         out_mpls_m =
2528                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
2529                                                          misc2_m,
2530                                                          outer_first_mpls);
2531                         out_mpls_v =
2532                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
2533                                                          misc2_v,
2534                                                          outer_first_mpls);
2535                 }
2536                 break;
2537         }
2538         if (out_mpls_m && out_mpls_v) {
2539                 *out_mpls_m = *in_mpls_m;
2540                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
2541         }
2542 }
2543
2544 /**
2545  * Add META item to matcher
2546  *
2547  * @param[in, out] matcher
2548  *   Flow matcher.
2549  * @param[in, out] key
2550  *   Flow matcher value.
2551  * @param[in] item
2552  *   Flow pattern to translate.
2553  * @param[in] inner
2554  *   Item is inner pattern.
2555  */
2556 static void
2557 flow_dv_translate_item_meta(void *matcher, void *key,
2558                             const struct rte_flow_item *item)
2559 {
2560         const struct rte_flow_item_meta *meta_m;
2561         const struct rte_flow_item_meta *meta_v;
2562         void *misc2_m =
2563                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
2564         void *misc2_v =
2565                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
2566
2567         meta_m = (const void *)item->mask;
2568         if (!meta_m)
2569                 meta_m = &rte_flow_item_meta_mask;
2570         meta_v = (const void *)item->spec;
2571         if (meta_v) {
2572                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a,
2573                          rte_be_to_cpu_32(meta_m->data));
2574                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a,
2575                          rte_be_to_cpu_32(meta_v->data & meta_m->data));
2576         }
2577 }
2578
2579 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
2580
2581 #define HEADER_IS_ZERO(match_criteria, headers)                              \
2582         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
2583                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
2584
2585 /**
2586  * Calculate flow matcher enable bitmap.
2587  *
2588  * @param match_criteria
2589  *   Pointer to flow matcher criteria.
2590  *
2591  * @return
2592  *   Bitmap of enabled fields.
2593  */
2594 static uint8_t
2595 flow_dv_matcher_enable(uint32_t *match_criteria)
2596 {
2597         uint8_t match_criteria_enable;
2598
2599         match_criteria_enable =
2600                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
2601                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
2602         match_criteria_enable |=
2603                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
2604                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
2605         match_criteria_enable |=
2606                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
2607                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
2608         match_criteria_enable |=
2609                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
2610                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
2611
2612         return match_criteria_enable;
2613 }
2614
2615 /**
2616  * Register the flow matcher.
2617  *
2618  * @param dev[in, out]
2619  *   Pointer to rte_eth_dev structure.
2620  * @param[in, out] matcher
2621  *   Pointer to flow matcher.
2622  * @parm[in, out] dev_flow
2623  *   Pointer to the dev_flow.
2624  * @param[out] error
2625  *   pointer to error structure.
2626  *
2627  * @return
2628  *   0 on success otherwise -errno and errno is set.
2629  */
2630 static int
2631 flow_dv_matcher_register(struct rte_eth_dev *dev,
2632                          struct mlx5_flow_dv_matcher *matcher,
2633                          struct mlx5_flow *dev_flow,
2634                          struct rte_flow_error *error)
2635 {
2636         struct priv *priv = dev->data->dev_private;
2637         struct mlx5_flow_dv_matcher *cache_matcher;
2638         struct mlx5dv_flow_matcher_attr dv_attr = {
2639                 .type = IBV_FLOW_ATTR_NORMAL,
2640                 .match_mask = (void *)&matcher->mask,
2641         };
2642
2643         /* Lookup from cache. */
2644         LIST_FOREACH(cache_matcher, &priv->matchers, next) {
2645                 if (matcher->crc == cache_matcher->crc &&
2646                     matcher->priority == cache_matcher->priority &&
2647                     matcher->egress == cache_matcher->egress &&
2648                     !memcmp((const void *)matcher->mask.buf,
2649                             (const void *)cache_matcher->mask.buf,
2650                             cache_matcher->mask.size)) {
2651                         DRV_LOG(DEBUG,
2652                                 "priority %hd use %s matcher %p: refcnt %d++",
2653                                 cache_matcher->priority,
2654                                 cache_matcher->egress ? "tx" : "rx",
2655                                 (void *)cache_matcher,
2656                                 rte_atomic32_read(&cache_matcher->refcnt));
2657                         rte_atomic32_inc(&cache_matcher->refcnt);
2658                         dev_flow->dv.matcher = cache_matcher;
2659                         return 0;
2660                 }
2661         }
2662         /* Register new matcher. */
2663         cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
2664         if (!cache_matcher)
2665                 return rte_flow_error_set(error, ENOMEM,
2666                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2667                                           "cannot allocate matcher memory");
2668         *cache_matcher = *matcher;
2669         dv_attr.match_criteria_enable =
2670                 flow_dv_matcher_enable(cache_matcher->mask.buf);
2671         dv_attr.priority = matcher->priority;
2672         if (matcher->egress)
2673                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
2674         cache_matcher->matcher_object =
2675                 mlx5_glue->dv_create_flow_matcher(priv->ctx, &dv_attr);
2676         if (!cache_matcher->matcher_object) {
2677                 rte_free(cache_matcher);
2678                 return rte_flow_error_set(error, ENOMEM,
2679                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2680                                           NULL, "cannot create matcher");
2681         }
2682         rte_atomic32_inc(&cache_matcher->refcnt);
2683         LIST_INSERT_HEAD(&priv->matchers, cache_matcher, next);
2684         dev_flow->dv.matcher = cache_matcher;
2685         DRV_LOG(DEBUG, "priority %hd new %s matcher %p: refcnt %d",
2686                 cache_matcher->priority,
2687                 cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher,
2688                 rte_atomic32_read(&cache_matcher->refcnt));
2689         return 0;
2690 }
2691
2692 /**
2693  * Fill the flow with DV spec.
2694  *
2695  * @param[in] dev
2696  *   Pointer to rte_eth_dev structure.
2697  * @param[in, out] dev_flow
2698  *   Pointer to the sub flow.
2699  * @param[in] attr
2700  *   Pointer to the flow attributes.
2701  * @param[in] items
2702  *   Pointer to the list of items.
2703  * @param[in] actions
2704  *   Pointer to the list of actions.
2705  * @param[out] error
2706  *   Pointer to the error structure.
2707  *
2708  * @return
2709  *   0 on success, a negative errno value otherwise and rte_ernno is set.
2710  */
2711 static int
2712 flow_dv_translate(struct rte_eth_dev *dev,
2713                   struct mlx5_flow *dev_flow,
2714                   const struct rte_flow_attr *attr,
2715                   const struct rte_flow_item items[],
2716                   const struct rte_flow_action actions[],
2717                   struct rte_flow_error *error)
2718 {
2719         struct priv *priv = dev->data->dev_private;
2720         struct rte_flow *flow = dev_flow->flow;
2721         uint64_t item_flags = 0;
2722         uint64_t last_item = 0;
2723         uint64_t action_flags = 0;
2724         uint64_t priority = attr->priority;
2725         struct mlx5_flow_dv_matcher matcher = {
2726                 .mask = {
2727                         .size = sizeof(matcher.mask.buf),
2728                 },
2729         };
2730         int actions_n = 0;
2731         bool actions_end = false;
2732         struct mlx5_flow_dv_modify_hdr_resource res = {
2733                 .ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
2734                                           MLX5DV_FLOW_TABLE_TYPE_NIC_RX
2735         };
2736         union flow_dv_attr flow_attr = { .attr = 0 };
2737
2738         if (priority == MLX5_FLOW_PRIO_RSVD)
2739                 priority = priv->config.flow_prio - 1;
2740         for (; !actions_end ; actions++) {
2741                 const struct rte_flow_action_queue *queue;
2742                 const struct rte_flow_action_rss *rss;
2743                 const struct rte_flow_action *action = actions;
2744                 const uint8_t *rss_key;
2745
2746                 switch (actions->type) {
2747                 case RTE_FLOW_ACTION_TYPE_VOID:
2748                         break;
2749                 case RTE_FLOW_ACTION_TYPE_FLAG:
2750                         dev_flow->dv.actions[actions_n].type =
2751                                 MLX5DV_FLOW_ACTION_TAG;
2752                         dev_flow->dv.actions[actions_n].tag_value =
2753                                 mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
2754                         actions_n++;
2755                         action_flags |= MLX5_FLOW_ACTION_FLAG;
2756                         break;
2757                 case RTE_FLOW_ACTION_TYPE_MARK:
2758                         dev_flow->dv.actions[actions_n].type =
2759                                 MLX5DV_FLOW_ACTION_TAG;
2760                         dev_flow->dv.actions[actions_n].tag_value =
2761                                 mlx5_flow_mark_set
2762                                 (((const struct rte_flow_action_mark *)
2763                                   (actions->conf))->id);
2764                         actions_n++;
2765                         action_flags |= MLX5_FLOW_ACTION_MARK;
2766                         break;
2767                 case RTE_FLOW_ACTION_TYPE_DROP:
2768                         dev_flow->dv.actions[actions_n].type =
2769                                 MLX5DV_FLOW_ACTION_DROP;
2770                         action_flags |= MLX5_FLOW_ACTION_DROP;
2771                         break;
2772                 case RTE_FLOW_ACTION_TYPE_QUEUE:
2773                         queue = actions->conf;
2774                         flow->rss.queue_num = 1;
2775                         (*flow->queue)[0] = queue->index;
2776                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
2777                         break;
2778                 case RTE_FLOW_ACTION_TYPE_RSS:
2779                         rss = actions->conf;
2780                         if (flow->queue)
2781                                 memcpy((*flow->queue), rss->queue,
2782                                        rss->queue_num * sizeof(uint16_t));
2783                         flow->rss.queue_num = rss->queue_num;
2784                         /* NULL RSS key indicates default RSS key. */
2785                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
2786                         memcpy(flow->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
2787                         /* RSS type 0 indicates default RSS type ETH_RSS_IP. */
2788                         flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
2789                         flow->rss.level = rss->level;
2790                         action_flags |= MLX5_FLOW_ACTION_RSS;
2791                         break;
2792                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
2793                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
2794                         if (flow_dv_create_action_l2_encap(dev, actions,
2795                                                            dev_flow, error))
2796                                 return -rte_errno;
2797                         dev_flow->dv.actions[actions_n].type =
2798                                 MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION;
2799                         dev_flow->dv.actions[actions_n].action =
2800                                 dev_flow->dv.encap_decap->verbs_action;
2801                         actions_n++;
2802                         action_flags |= actions->type ==
2803                                         RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
2804                                         MLX5_FLOW_ACTION_VXLAN_ENCAP :
2805                                         MLX5_FLOW_ACTION_NVGRE_ENCAP;
2806                         break;
2807                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
2808                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
2809                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
2810                                                            error))
2811                                 return -rte_errno;
2812                         dev_flow->dv.actions[actions_n].type =
2813                                 MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION;
2814                         dev_flow->dv.actions[actions_n].action =
2815                                 dev_flow->dv.encap_decap->verbs_action;
2816                         actions_n++;
2817                         action_flags |= actions->type ==
2818                                         RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
2819                                         MLX5_FLOW_ACTION_VXLAN_DECAP :
2820                                         MLX5_FLOW_ACTION_NVGRE_DECAP;
2821                         break;
2822                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
2823                         /* Handle encap with preceding decap. */
2824                         if (action_flags & MLX5_FLOW_ACTION_RAW_DECAP) {
2825                                 if (flow_dv_create_action_raw_encap
2826                                         (dev, actions, dev_flow, attr, error))
2827                                         return -rte_errno;
2828                                 dev_flow->dv.actions[actions_n].type =
2829                                         MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION;
2830                                 dev_flow->dv.actions[actions_n].action =
2831                                         dev_flow->dv.encap_decap->verbs_action;
2832                         } else {
2833                                 /* Handle encap without preceding decap. */
2834                                 if (flow_dv_create_action_l2_encap(dev, actions,
2835                                                                    dev_flow,
2836                                                                    error))
2837                                         return -rte_errno;
2838                                 dev_flow->dv.actions[actions_n].type =
2839                                         MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION;
2840                                 dev_flow->dv.actions[actions_n].action =
2841                                         dev_flow->dv.encap_decap->verbs_action;
2842                         }
2843                         actions_n++;
2844                         action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
2845                         break;
2846                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
2847                         /* Check if this decap is followed by encap. */
2848                         for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
2849                                action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
2850                                action++) {
2851                         }
2852                         /* Handle decap only if it isn't followed by encap. */
2853                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
2854                                 if (flow_dv_create_action_l2_decap(dev,
2855                                                                    dev_flow,
2856                                                                    error))
2857                                         return -rte_errno;
2858                                 dev_flow->dv.actions[actions_n].type =
2859                                         MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION;
2860                                 dev_flow->dv.actions[actions_n].action =
2861                                         dev_flow->dv.encap_decap->verbs_action;
2862                                 actions_n++;
2863                         }
2864                         /* If decap is followed by encap, handle it at encap. */
2865                         action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
2866                         break;
2867                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
2868                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
2869                         if (flow_dv_convert_action_modify_mac(&res, actions,
2870                                                               error))
2871                                 return -rte_errno;
2872                         action_flags |= actions->type ==
2873                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
2874                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
2875                                         MLX5_FLOW_ACTION_SET_MAC_DST;
2876                         break;
2877                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
2878                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
2879                         if (flow_dv_convert_action_modify_ipv4(&res, actions,
2880                                                                error))
2881                                 return -rte_errno;
2882                         action_flags |= actions->type ==
2883                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
2884                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
2885                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
2886                         break;
2887                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
2888                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
2889                         if (flow_dv_convert_action_modify_ipv6(&res, actions,
2890                                                                error))
2891                                 return -rte_errno;
2892                         action_flags |= actions->type ==
2893                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
2894                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
2895                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
2896                         break;
2897                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
2898                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
2899                         if (flow_dv_convert_action_modify_tp(&res, actions,
2900                                                              items, &flow_attr,
2901                                                              error))
2902                                 return -rte_errno;
2903                         action_flags |= actions->type ==
2904                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
2905                                         MLX5_FLOW_ACTION_SET_TP_SRC :
2906                                         MLX5_FLOW_ACTION_SET_TP_DST;
2907                         break;
2908                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
2909                         if (flow_dv_convert_action_modify_dec_ttl(&res, items,
2910                                                                   &flow_attr,
2911                                                                   error))
2912                                 return -rte_errno;
2913                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
2914                         break;
2915                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
2916                         if (flow_dv_convert_action_modify_ttl(&res, actions,
2917                                                              items, &flow_attr,
2918                                                              error))
2919                                 return -rte_errno;
2920                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
2921                         break;
2922                 case RTE_FLOW_ACTION_TYPE_END:
2923                         actions_end = true;
2924                         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) {
2925                                 /* create modify action if needed. */
2926                                 if (flow_dv_modify_hdr_resource_register
2927                                                                 (dev, &res,
2928                                                                  dev_flow,
2929                                                                  error))
2930                                         return -rte_errno;
2931                                 dev_flow->dv.actions[actions_n].type =
2932                                         MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION;
2933                                 dev_flow->dv.actions[actions_n].action =
2934                                         dev_flow->dv.modify_hdr->verbs_action;
2935                                 actions_n++;
2936                         }
2937                         break;
2938                 default:
2939                         break;
2940                 }
2941         }
2942         dev_flow->dv.actions_n = actions_n;
2943         flow->actions = action_flags;
2944         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2945                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2946                 void *match_mask = matcher.mask.buf;
2947                 void *match_value = dev_flow->dv.value.buf;
2948
2949                 switch (items->type) {
2950                 case RTE_FLOW_ITEM_TYPE_ETH:
2951                         flow_dv_translate_item_eth(match_mask, match_value,
2952                                                    items, tunnel);
2953                         matcher.priority = MLX5_PRIORITY_MAP_L2;
2954                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
2955                                              MLX5_FLOW_LAYER_OUTER_L2;
2956                         break;
2957                 case RTE_FLOW_ITEM_TYPE_VLAN:
2958                         flow_dv_translate_item_vlan(match_mask, match_value,
2959                                                     items, tunnel);
2960                         matcher.priority = MLX5_PRIORITY_MAP_L2;
2961                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
2962                                               MLX5_FLOW_LAYER_INNER_VLAN) :
2963                                              (MLX5_FLOW_LAYER_OUTER_L2 |
2964                                               MLX5_FLOW_LAYER_OUTER_VLAN);
2965                         break;
2966                 case RTE_FLOW_ITEM_TYPE_IPV4:
2967                         flow_dv_translate_item_ipv4(match_mask, match_value,
2968                                                     items, tunnel);
2969                         matcher.priority = MLX5_PRIORITY_MAP_L3;
2970                         dev_flow->dv.hash_fields |=
2971                                 mlx5_flow_hashfields_adjust
2972                                         (dev_flow, tunnel,
2973                                          MLX5_IPV4_LAYER_TYPES,
2974                                          MLX5_IPV4_IBV_RX_HASH);
2975                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
2976                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
2977                         break;
2978                 case RTE_FLOW_ITEM_TYPE_IPV6:
2979                         flow_dv_translate_item_ipv6(match_mask, match_value,
2980                                                     items, tunnel);
2981                         matcher.priority = MLX5_PRIORITY_MAP_L3;
2982                         dev_flow->dv.hash_fields |=
2983                                 mlx5_flow_hashfields_adjust
2984                                         (dev_flow, tunnel,
2985                                          MLX5_IPV6_LAYER_TYPES,
2986                                          MLX5_IPV6_IBV_RX_HASH);
2987                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
2988                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
2989                         break;
2990                 case RTE_FLOW_ITEM_TYPE_TCP:
2991                         flow_dv_translate_item_tcp(match_mask, match_value,
2992                                                    items, tunnel);
2993                         matcher.priority = MLX5_PRIORITY_MAP_L4;
2994                         dev_flow->dv.hash_fields |=
2995                                 mlx5_flow_hashfields_adjust
2996                                         (dev_flow, tunnel, ETH_RSS_TCP,
2997                                          IBV_RX_HASH_SRC_PORT_TCP |
2998                                          IBV_RX_HASH_DST_PORT_TCP);
2999                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
3000                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
3001                         break;
3002                 case RTE_FLOW_ITEM_TYPE_UDP:
3003                         flow_dv_translate_item_udp(match_mask, match_value,
3004                                                    items, tunnel);
3005                         matcher.priority = MLX5_PRIORITY_MAP_L4;
3006                         dev_flow->dv.hash_fields |=
3007                                 mlx5_flow_hashfields_adjust
3008                                         (dev_flow, tunnel, ETH_RSS_UDP,
3009                                          IBV_RX_HASH_SRC_PORT_UDP |
3010                                          IBV_RX_HASH_DST_PORT_UDP);
3011                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
3012                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
3013                         break;
3014                 case RTE_FLOW_ITEM_TYPE_GRE:
3015                         flow_dv_translate_item_gre(match_mask, match_value,
3016                                                    items, tunnel);
3017                         last_item = MLX5_FLOW_LAYER_GRE;
3018                         break;
3019                 case RTE_FLOW_ITEM_TYPE_NVGRE:
3020                         flow_dv_translate_item_nvgre(match_mask, match_value,
3021                                                      items, tunnel);
3022                         last_item = MLX5_FLOW_LAYER_GRE;
3023                         break;
3024                 case RTE_FLOW_ITEM_TYPE_VXLAN:
3025                         flow_dv_translate_item_vxlan(match_mask, match_value,
3026                                                      items, tunnel);
3027                         last_item = MLX5_FLOW_LAYER_VXLAN;
3028                         break;
3029                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3030                         flow_dv_translate_item_vxlan(match_mask, match_value,
3031                                                      items, tunnel);
3032                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
3033                         break;
3034                 case RTE_FLOW_ITEM_TYPE_MPLS:
3035                         flow_dv_translate_item_mpls(match_mask, match_value,
3036                                                     items, last_item, tunnel);
3037                         last_item = MLX5_FLOW_LAYER_MPLS;
3038                         break;
3039                 case RTE_FLOW_ITEM_TYPE_META:
3040                         flow_dv_translate_item_meta(match_mask, match_value,
3041                                                     items);
3042                         last_item = MLX5_FLOW_ITEM_METADATA;
3043                         break;
3044                 default:
3045                         break;
3046                 }
3047                 item_flags |= last_item;
3048         }
3049         assert(!flow_dv_check_valid_spec(matcher.mask.buf,
3050                                          dev_flow->dv.value.buf));
3051         dev_flow->layers = item_flags;
3052         /* Register matcher. */
3053         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
3054                                     matcher.mask.size);
3055         matcher.priority = mlx5_flow_adjust_priority(dev, priority,
3056                                                      matcher.priority);
3057         matcher.egress = attr->egress;
3058         if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
3059                 return -rte_errno;
3060         return 0;
3061 }
3062
3063 /**
3064  * Apply the flow to the NIC.
3065  *
3066  * @param[in] dev
3067  *   Pointer to the Ethernet device structure.
3068  * @param[in, out] flow
3069  *   Pointer to flow structure.
3070  * @param[out] error
3071  *   Pointer to error structure.
3072  *
3073  * @return
3074  *   0 on success, a negative errno value otherwise and rte_errno is set.
3075  */
3076 static int
3077 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
3078               struct rte_flow_error *error)
3079 {
3080         struct mlx5_flow_dv *dv;
3081         struct mlx5_flow *dev_flow;
3082         int n;
3083         int err;
3084
3085         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
3086                 dv = &dev_flow->dv;
3087                 n = dv->actions_n;
3088                 if (flow->actions & MLX5_FLOW_ACTION_DROP) {
3089                         dv->hrxq = mlx5_hrxq_drop_new(dev);
3090                         if (!dv->hrxq) {
3091                                 rte_flow_error_set
3092                                         (error, errno,
3093                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3094                                          "cannot get drop hash queue");
3095                                 goto error;
3096                         }
3097                         dv->actions[n].type = MLX5DV_FLOW_ACTION_DEST_IBV_QP;
3098                         dv->actions[n].qp = dv->hrxq->qp;
3099                         n++;
3100                 } else if (flow->actions &
3101                            (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
3102                         struct mlx5_hrxq *hrxq;
3103
3104                         hrxq = mlx5_hrxq_get(dev, flow->key,
3105                                              MLX5_RSS_HASH_KEY_LEN,
3106                                              dv->hash_fields,
3107                                              (*flow->queue),
3108                                              flow->rss.queue_num);
3109                         if (!hrxq)
3110                                 hrxq = mlx5_hrxq_new
3111                                         (dev, flow->key, MLX5_RSS_HASH_KEY_LEN,
3112                                          dv->hash_fields, (*flow->queue),
3113                                          flow->rss.queue_num,
3114                                          !!(dev_flow->layers &
3115                                             MLX5_FLOW_LAYER_TUNNEL));
3116                         if (!hrxq) {
3117                                 rte_flow_error_set
3118                                         (error, rte_errno,
3119                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3120                                          "cannot get hash queue");
3121                                 goto error;
3122                         }
3123                         dv->hrxq = hrxq;
3124                         dv->actions[n].type = MLX5DV_FLOW_ACTION_DEST_IBV_QP;
3125                         dv->actions[n].qp = hrxq->qp;
3126                         n++;
3127                 }
3128                 dv->flow =
3129                         mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
3130                                                   (void *)&dv->value, n,
3131                                                   dv->actions);
3132                 if (!dv->flow) {
3133                         rte_flow_error_set(error, errno,
3134                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3135                                            NULL,
3136                                            "hardware refuses to create flow");
3137                         goto error;
3138                 }
3139         }
3140         return 0;
3141 error:
3142         err = rte_errno; /* Save rte_errno before cleanup. */
3143         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
3144                 struct mlx5_flow_dv *dv = &dev_flow->dv;
3145                 if (dv->hrxq) {
3146                         if (flow->actions & MLX5_FLOW_ACTION_DROP)
3147                                 mlx5_hrxq_drop_release(dev);
3148                         else
3149                                 mlx5_hrxq_release(dev, dv->hrxq);
3150                         dv->hrxq = NULL;
3151                 }
3152         }
3153         rte_errno = err; /* Restore rte_errno. */
3154         return -rte_errno;
3155 }
3156
3157 /**
3158  * Release the flow matcher.
3159  *
3160  * @param dev
3161  *   Pointer to Ethernet device.
3162  * @param flow
3163  *   Pointer to mlx5_flow.
3164  *
3165  * @return
3166  *   1 while a reference on it exists, 0 when freed.
3167  */
3168 static int
3169 flow_dv_matcher_release(struct rte_eth_dev *dev,
3170                         struct mlx5_flow *flow)
3171 {
3172         struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
3173
3174         assert(matcher->matcher_object);
3175         DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
3176                 dev->data->port_id, (void *)matcher,
3177                 rte_atomic32_read(&matcher->refcnt));
3178         if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
3179                 claim_zero(mlx5_glue->dv_destroy_flow_matcher
3180                            (matcher->matcher_object));
3181                 LIST_REMOVE(matcher, next);
3182                 rte_free(matcher);
3183                 DRV_LOG(DEBUG, "port %u matcher %p: removed",
3184                         dev->data->port_id, (void *)matcher);
3185                 return 0;
3186         }
3187         return 1;
3188 }
3189
3190 /**
3191  * Release an encap/decap resource.
3192  *
3193  * @param flow
3194  *   Pointer to mlx5_flow.
3195  *
3196  * @return
3197  *   1 while a reference on it exists, 0 when freed.
3198  */
3199 static int
3200 flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
3201 {
3202         struct mlx5_flow_dv_encap_decap_resource *cache_resource =
3203                                                 flow->dv.encap_decap;
3204
3205         assert(cache_resource->verbs_action);
3206         DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
3207                 (void *)cache_resource,
3208                 rte_atomic32_read(&cache_resource->refcnt));
3209         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
3210                 claim_zero(mlx5_glue->destroy_flow_action
3211                                 (cache_resource->verbs_action));
3212                 LIST_REMOVE(cache_resource, next);
3213                 rte_free(cache_resource);
3214                 DRV_LOG(DEBUG, "encap/decap resource %p: removed",
3215                         (void *)cache_resource);
3216                 return 0;
3217         }
3218         return 1;
3219 }
3220
3221 /**
3222  * Release a modify-header resource.
3223  *
3224  * @param flow
3225  *   Pointer to mlx5_flow.
3226  *
3227  * @return
3228  *   1 while a reference on it exists, 0 when freed.
3229  */
3230 static int
3231 flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow)
3232 {
3233         struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
3234                                                 flow->dv.modify_hdr;
3235
3236         assert(cache_resource->verbs_action);
3237         DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
3238                 (void *)cache_resource,
3239                 rte_atomic32_read(&cache_resource->refcnt));
3240         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
3241                 claim_zero(mlx5_glue->destroy_flow_action
3242                                 (cache_resource->verbs_action));
3243                 LIST_REMOVE(cache_resource, next);
3244                 rte_free(cache_resource);
3245                 DRV_LOG(DEBUG, "modify-header resource %p: removed",
3246                         (void *)cache_resource);
3247                 return 0;
3248         }
3249         return 1;
3250 }
3251
3252 /**
3253  * Remove the flow from the NIC but keeps it in memory.
3254  *
3255  * @param[in] dev
3256  *   Pointer to Ethernet device.
3257  * @param[in, out] flow
3258  *   Pointer to flow structure.
3259  */
3260 static void
3261 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
3262 {
3263         struct mlx5_flow_dv *dv;
3264         struct mlx5_flow *dev_flow;
3265
3266         if (!flow)
3267                 return;
3268         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
3269                 dv = &dev_flow->dv;
3270                 if (dv->flow) {
3271                         claim_zero(mlx5_glue->destroy_flow(dv->flow));
3272                         dv->flow = NULL;
3273                 }
3274                 if (dv->hrxq) {
3275                         if (flow->actions & MLX5_FLOW_ACTION_DROP)
3276                                 mlx5_hrxq_drop_release(dev);
3277                         else
3278                                 mlx5_hrxq_release(dev, dv->hrxq);
3279                         dv->hrxq = NULL;
3280                 }
3281         }
3282         if (flow->counter)
3283                 flow->counter = NULL;
3284 }
3285
3286 /**
3287  * Remove the flow from the NIC and the memory.
3288  *
3289  * @param[in] dev
3290  *   Pointer to the Ethernet device structure.
3291  * @param[in, out] flow
3292  *   Pointer to flow structure.
3293  */
3294 static void
3295 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
3296 {
3297         struct mlx5_flow *dev_flow;
3298
3299         if (!flow)
3300                 return;
3301         flow_dv_remove(dev, flow);
3302         while (!LIST_EMPTY(&flow->dev_flows)) {
3303                 dev_flow = LIST_FIRST(&flow->dev_flows);
3304                 LIST_REMOVE(dev_flow, next);
3305                 if (dev_flow->dv.matcher)
3306                         flow_dv_matcher_release(dev, dev_flow);
3307                 if (dev_flow->dv.encap_decap)
3308                         flow_dv_encap_decap_resource_release(dev_flow);
3309                 if (dev_flow->dv.modify_hdr)
3310                         flow_dv_modify_hdr_resource_release(dev_flow);
3311                 rte_free(dev_flow);
3312         }
3313 }
3314
3315 /**
3316  * Query a flow.
3317  *
3318  * @see rte_flow_query()
3319  * @see rte_flow_ops
3320  */
3321 static int
3322 flow_dv_query(struct rte_eth_dev *dev __rte_unused,
3323               struct rte_flow *flow __rte_unused,
3324               const struct rte_flow_action *actions __rte_unused,
3325               void *data __rte_unused,
3326               struct rte_flow_error *error __rte_unused)
3327 {
3328         return rte_flow_error_set(error, ENOTSUP,
3329                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3330                                   NULL,
3331                                   "flow query with DV is not supported");
3332 }
3333
3334
3335 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
3336         .validate = flow_dv_validate,
3337         .prepare = flow_dv_prepare,
3338         .translate = flow_dv_translate,
3339         .apply = flow_dv_apply,
3340         .remove = flow_dv_remove,
3341         .destroy = flow_dv_destroy,
3342         .query = flow_dv_query,
3343 };
3344
3345 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */