net/mlx5: add flow translation of eCPRI header
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 /* Verbs header. */
12 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
13 #ifdef PEDANTIC
14 #pragma GCC diagnostic ignored "-Wpedantic"
15 #endif
16 #include <infiniband/verbs.h>
17 #ifdef PEDANTIC
18 #pragma GCC diagnostic error "-Wpedantic"
19 #endif
20
21 #include <rte_common.h>
22 #include <rte_ether.h>
23 #include <rte_ethdev_driver.h>
24 #include <rte_flow.h>
25 #include <rte_flow_driver.h>
26 #include <rte_malloc.h>
27 #include <rte_cycles.h>
28 #include <rte_ip.h>
29 #include <rte_gre.h>
30 #include <rte_vxlan.h>
31 #include <rte_gtp.h>
32
33 #include <mlx5_devx_cmds.h>
34 #include <mlx5_prm.h>
35
36 #include "mlx5_defs.h"
37 #include "mlx5.h"
38 #include "mlx5_common_os.h"
39 #include "mlx5_flow.h"
40 #include "mlx5_flow_os.h"
41 #include "mlx5_rxtx.h"
42
43 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
44
45 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
46 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
47 #endif
48
49 #ifndef HAVE_MLX5DV_DR_ESWITCH
50 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
51 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
52 #endif
53 #endif
54
55 #ifndef HAVE_MLX5DV_DR
56 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
57 #endif
58
59 /* VLAN header definitions */
60 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
61 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
62 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
63 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
64 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
65
66 union flow_dv_attr {
67         struct {
68                 uint32_t valid:1;
69                 uint32_t ipv4:1;
70                 uint32_t ipv6:1;
71                 uint32_t tcp:1;
72                 uint32_t udp:1;
73                 uint32_t reserved:27;
74         };
75         uint32_t attr;
76 };
77
78 static int
79 flow_dv_tbl_resource_release(struct rte_eth_dev *dev,
80                              struct mlx5_flow_tbl_resource *tbl);
81
82 static int
83 flow_dv_default_miss_resource_release(struct rte_eth_dev *dev);
84
85 /**
86  * Initialize flow attributes structure according to flow items' types.
87  *
88  * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel
89  * mode. For tunnel mode, the items to be modified are the outermost ones.
90  *
91  * @param[in] item
92  *   Pointer to item specification.
93  * @param[out] attr
94  *   Pointer to flow attributes structure.
95  * @param[in] dev_flow
96  *   Pointer to the sub flow.
97  * @param[in] tunnel_decap
98  *   Whether action is after tunnel decapsulation.
99  */
100 static void
101 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
102                   struct mlx5_flow *dev_flow, bool tunnel_decap)
103 {
104         uint64_t layers = dev_flow->handle->layers;
105
106         /*
107          * If layers is already initialized, it means this dev_flow is the
108          * suffix flow, the layers flags is set by the prefix flow. Need to
109          * use the layer flags from prefix flow as the suffix flow may not
110          * have the user defined items as the flow is split.
111          */
112         if (layers) {
113                 if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
114                         attr->ipv4 = 1;
115                 else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
116                         attr->ipv6 = 1;
117                 if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
118                         attr->tcp = 1;
119                 else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
120                         attr->udp = 1;
121                 attr->valid = 1;
122                 return;
123         }
124         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
125                 uint8_t next_protocol = 0xff;
126                 switch (item->type) {
127                 case RTE_FLOW_ITEM_TYPE_GRE:
128                 case RTE_FLOW_ITEM_TYPE_NVGRE:
129                 case RTE_FLOW_ITEM_TYPE_VXLAN:
130                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
131                 case RTE_FLOW_ITEM_TYPE_GENEVE:
132                 case RTE_FLOW_ITEM_TYPE_MPLS:
133                         if (tunnel_decap)
134                                 attr->attr = 0;
135                         break;
136                 case RTE_FLOW_ITEM_TYPE_IPV4:
137                         if (!attr->ipv6)
138                                 attr->ipv4 = 1;
139                         if (item->mask != NULL &&
140                             ((const struct rte_flow_item_ipv4 *)
141                             item->mask)->hdr.next_proto_id)
142                                 next_protocol =
143                                     ((const struct rte_flow_item_ipv4 *)
144                                       (item->spec))->hdr.next_proto_id &
145                                     ((const struct rte_flow_item_ipv4 *)
146                                       (item->mask))->hdr.next_proto_id;
147                         if ((next_protocol == IPPROTO_IPIP ||
148                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
149                                 attr->attr = 0;
150                         break;
151                 case RTE_FLOW_ITEM_TYPE_IPV6:
152                         if (!attr->ipv4)
153                                 attr->ipv6 = 1;
154                         if (item->mask != NULL &&
155                             ((const struct rte_flow_item_ipv6 *)
156                             item->mask)->hdr.proto)
157                                 next_protocol =
158                                     ((const struct rte_flow_item_ipv6 *)
159                                       (item->spec))->hdr.proto &
160                                     ((const struct rte_flow_item_ipv6 *)
161                                       (item->mask))->hdr.proto;
162                         if ((next_protocol == IPPROTO_IPIP ||
163                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
164                                 attr->attr = 0;
165                         break;
166                 case RTE_FLOW_ITEM_TYPE_UDP:
167                         if (!attr->tcp)
168                                 attr->udp = 1;
169                         break;
170                 case RTE_FLOW_ITEM_TYPE_TCP:
171                         if (!attr->udp)
172                                 attr->tcp = 1;
173                         break;
174                 default:
175                         break;
176                 }
177         }
178         attr->valid = 1;
179 }
180
181 /**
182  * Convert rte_mtr_color to mlx5 color.
183  *
184  * @param[in] rcol
185  *   rte_mtr_color.
186  *
187  * @return
188  *   mlx5 color.
189  */
190 static int
191 rte_col_2_mlx5_col(enum rte_color rcol)
192 {
193         switch (rcol) {
194         case RTE_COLOR_GREEN:
195                 return MLX5_FLOW_COLOR_GREEN;
196         case RTE_COLOR_YELLOW:
197                 return MLX5_FLOW_COLOR_YELLOW;
198         case RTE_COLOR_RED:
199                 return MLX5_FLOW_COLOR_RED;
200         default:
201                 break;
202         }
203         return MLX5_FLOW_COLOR_UNDEFINED;
204 }
205
206 struct field_modify_info {
207         uint32_t size; /* Size of field in protocol header, in bytes. */
208         uint32_t offset; /* Offset of field in protocol header, in bytes. */
209         enum mlx5_modification_field id;
210 };
211
212 struct field_modify_info modify_eth[] = {
213         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
214         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
215         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
216         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
217         {0, 0, 0},
218 };
219
220 struct field_modify_info modify_vlan_out_first_vid[] = {
221         /* Size in bits !!! */
222         {12, 0, MLX5_MODI_OUT_FIRST_VID},
223         {0, 0, 0},
224 };
225
226 struct field_modify_info modify_ipv4[] = {
227         {1,  1, MLX5_MODI_OUT_IP_DSCP},
228         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
229         {4, 12, MLX5_MODI_OUT_SIPV4},
230         {4, 16, MLX5_MODI_OUT_DIPV4},
231         {0, 0, 0},
232 };
233
234 struct field_modify_info modify_ipv6[] = {
235         {1,  0, MLX5_MODI_OUT_IP_DSCP},
236         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
237         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
238         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
239         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
240         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
241         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
242         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
243         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
244         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
245         {0, 0, 0},
246 };
247
248 struct field_modify_info modify_udp[] = {
249         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
250         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
251         {0, 0, 0},
252 };
253
254 struct field_modify_info modify_tcp[] = {
255         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
256         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
257         {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
258         {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
259         {0, 0, 0},
260 };
261
262 static void
263 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
264                           uint8_t next_protocol, uint64_t *item_flags,
265                           int *tunnel)
266 {
267         MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
268                     item->type == RTE_FLOW_ITEM_TYPE_IPV6);
269         if (next_protocol == IPPROTO_IPIP) {
270                 *item_flags |= MLX5_FLOW_LAYER_IPIP;
271                 *tunnel = 1;
272         }
273         if (next_protocol == IPPROTO_IPV6) {
274                 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
275                 *tunnel = 1;
276         }
277 }
278
279 /**
280  * Acquire the synchronizing object to protect multithreaded access
281  * to shared dv context. Lock occurs only if context is actually
282  * shared, i.e. we have multiport IB device and representors are
283  * created.
284  *
285  * @param[in] dev
286  *   Pointer to the rte_eth_dev structure.
287  */
288 static void
289 flow_dv_shared_lock(struct rte_eth_dev *dev)
290 {
291         struct mlx5_priv *priv = dev->data->dev_private;
292         struct mlx5_dev_ctx_shared *sh = priv->sh;
293
294         if (sh->dv_refcnt > 1) {
295                 int ret;
296
297                 ret = pthread_mutex_lock(&sh->dv_mutex);
298                 MLX5_ASSERT(!ret);
299                 (void)ret;
300         }
301 }
302
303 static void
304 flow_dv_shared_unlock(struct rte_eth_dev *dev)
305 {
306         struct mlx5_priv *priv = dev->data->dev_private;
307         struct mlx5_dev_ctx_shared *sh = priv->sh;
308
309         if (sh->dv_refcnt > 1) {
310                 int ret;
311
312                 ret = pthread_mutex_unlock(&sh->dv_mutex);
313                 MLX5_ASSERT(!ret);
314                 (void)ret;
315         }
316 }
317
318 /* Update VLAN's VID/PCP based on input rte_flow_action.
319  *
320  * @param[in] action
321  *   Pointer to struct rte_flow_action.
322  * @param[out] vlan
323  *   Pointer to struct rte_vlan_hdr.
324  */
325 static void
326 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
327                          struct rte_vlan_hdr *vlan)
328 {
329         uint16_t vlan_tci;
330         if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
331                 vlan_tci =
332                     ((const struct rte_flow_action_of_set_vlan_pcp *)
333                                                action->conf)->vlan_pcp;
334                 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
335                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
336                 vlan->vlan_tci |= vlan_tci;
337         } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
338                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
339                 vlan->vlan_tci |= rte_be_to_cpu_16
340                     (((const struct rte_flow_action_of_set_vlan_vid *)
341                                              action->conf)->vlan_vid);
342         }
343 }
344
345 /**
346  * Fetch 1, 2, 3 or 4 byte field from the byte array
347  * and return as unsigned integer in host-endian format.
348  *
349  * @param[in] data
350  *   Pointer to data array.
351  * @param[in] size
352  *   Size of field to extract.
353  *
354  * @return
355  *   converted field in host endian format.
356  */
357 static inline uint32_t
358 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
359 {
360         uint32_t ret;
361
362         switch (size) {
363         case 1:
364                 ret = *data;
365                 break;
366         case 2:
367                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
368                 break;
369         case 3:
370                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
371                 ret = (ret << 8) | *(data + sizeof(uint16_t));
372                 break;
373         case 4:
374                 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
375                 break;
376         default:
377                 MLX5_ASSERT(false);
378                 ret = 0;
379                 break;
380         }
381         return ret;
382 }
383
384 /**
385  * Convert modify-header action to DV specification.
386  *
387  * Data length of each action is determined by provided field description
388  * and the item mask. Data bit offset and width of each action is determined
389  * by provided item mask.
390  *
391  * @param[in] item
392  *   Pointer to item specification.
393  * @param[in] field
394  *   Pointer to field modification information.
395  *     For MLX5_MODIFICATION_TYPE_SET specifies destination field.
396  *     For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
397  *     For MLX5_MODIFICATION_TYPE_COPY specifies source field.
398  * @param[in] dcopy
399  *   Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
400  *   Negative offset value sets the same offset as source offset.
401  *   size field is ignored, value is taken from source field.
402  * @param[in,out] resource
403  *   Pointer to the modify-header resource.
404  * @param[in] type
405  *   Type of modification.
406  * @param[out] error
407  *   Pointer to the error structure.
408  *
409  * @return
410  *   0 on success, a negative errno value otherwise and rte_errno is set.
411  */
412 static int
413 flow_dv_convert_modify_action(struct rte_flow_item *item,
414                               struct field_modify_info *field,
415                               struct field_modify_info *dcopy,
416                               struct mlx5_flow_dv_modify_hdr_resource *resource,
417                               uint32_t type, struct rte_flow_error *error)
418 {
419         uint32_t i = resource->actions_num;
420         struct mlx5_modification_cmd *actions = resource->actions;
421
422         /*
423          * The item and mask are provided in big-endian format.
424          * The fields should be presented as in big-endian format either.
425          * Mask must be always present, it defines the actual field width.
426          */
427         MLX5_ASSERT(item->mask);
428         MLX5_ASSERT(field->size);
429         do {
430                 unsigned int size_b;
431                 unsigned int off_b;
432                 uint32_t mask;
433                 uint32_t data;
434
435                 if (i >= MLX5_MAX_MODIFY_NUM)
436                         return rte_flow_error_set(error, EINVAL,
437                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
438                                  "too many items to modify");
439                 /* Fetch variable byte size mask from the array. */
440                 mask = flow_dv_fetch_field((const uint8_t *)item->mask +
441                                            field->offset, field->size);
442                 if (!mask) {
443                         ++field;
444                         continue;
445                 }
446                 /* Deduce actual data width in bits from mask value. */
447                 off_b = rte_bsf32(mask);
448                 size_b = sizeof(uint32_t) * CHAR_BIT -
449                          off_b - __builtin_clz(mask);
450                 MLX5_ASSERT(size_b);
451                 size_b = size_b == sizeof(uint32_t) * CHAR_BIT ? 0 : size_b;
452                 actions[i] = (struct mlx5_modification_cmd) {
453                         .action_type = type,
454                         .field = field->id,
455                         .offset = off_b,
456                         .length = size_b,
457                 };
458                 /* Convert entire record to expected big-endian format. */
459                 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
460                 if (type == MLX5_MODIFICATION_TYPE_COPY) {
461                         MLX5_ASSERT(dcopy);
462                         actions[i].dst_field = dcopy->id;
463                         actions[i].dst_offset =
464                                 (int)dcopy->offset < 0 ? off_b : dcopy->offset;
465                         /* Convert entire record to big-endian format. */
466                         actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
467                 } else {
468                         MLX5_ASSERT(item->spec);
469                         data = flow_dv_fetch_field((const uint8_t *)item->spec +
470                                                    field->offset, field->size);
471                         /* Shift out the trailing masked bits from data. */
472                         data = (data & mask) >> off_b;
473                         actions[i].data1 = rte_cpu_to_be_32(data);
474                 }
475                 ++i;
476                 ++field;
477         } while (field->size);
478         if (resource->actions_num == i)
479                 return rte_flow_error_set(error, EINVAL,
480                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
481                                           "invalid modification flow item");
482         resource->actions_num = i;
483         return 0;
484 }
485
486 /**
487  * Convert modify-header set IPv4 address action to DV specification.
488  *
489  * @param[in,out] resource
490  *   Pointer to the modify-header resource.
491  * @param[in] action
492  *   Pointer to action specification.
493  * @param[out] error
494  *   Pointer to the error structure.
495  *
496  * @return
497  *   0 on success, a negative errno value otherwise and rte_errno is set.
498  */
499 static int
500 flow_dv_convert_action_modify_ipv4
501                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
502                          const struct rte_flow_action *action,
503                          struct rte_flow_error *error)
504 {
505         const struct rte_flow_action_set_ipv4 *conf =
506                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
507         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
508         struct rte_flow_item_ipv4 ipv4;
509         struct rte_flow_item_ipv4 ipv4_mask;
510
511         memset(&ipv4, 0, sizeof(ipv4));
512         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
513         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
514                 ipv4.hdr.src_addr = conf->ipv4_addr;
515                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
516         } else {
517                 ipv4.hdr.dst_addr = conf->ipv4_addr;
518                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
519         }
520         item.spec = &ipv4;
521         item.mask = &ipv4_mask;
522         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
523                                              MLX5_MODIFICATION_TYPE_SET, error);
524 }
525
526 /**
527  * Convert modify-header set IPv6 address action to DV specification.
528  *
529  * @param[in,out] resource
530  *   Pointer to the modify-header resource.
531  * @param[in] action
532  *   Pointer to action specification.
533  * @param[out] error
534  *   Pointer to the error structure.
535  *
536  * @return
537  *   0 on success, a negative errno value otherwise and rte_errno is set.
538  */
539 static int
540 flow_dv_convert_action_modify_ipv6
541                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
542                          const struct rte_flow_action *action,
543                          struct rte_flow_error *error)
544 {
545         const struct rte_flow_action_set_ipv6 *conf =
546                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
547         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
548         struct rte_flow_item_ipv6 ipv6;
549         struct rte_flow_item_ipv6 ipv6_mask;
550
551         memset(&ipv6, 0, sizeof(ipv6));
552         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
553         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
554                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
555                        sizeof(ipv6.hdr.src_addr));
556                 memcpy(&ipv6_mask.hdr.src_addr,
557                        &rte_flow_item_ipv6_mask.hdr.src_addr,
558                        sizeof(ipv6.hdr.src_addr));
559         } else {
560                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
561                        sizeof(ipv6.hdr.dst_addr));
562                 memcpy(&ipv6_mask.hdr.dst_addr,
563                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
564                        sizeof(ipv6.hdr.dst_addr));
565         }
566         item.spec = &ipv6;
567         item.mask = &ipv6_mask;
568         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
569                                              MLX5_MODIFICATION_TYPE_SET, error);
570 }
571
572 /**
573  * Convert modify-header set MAC address action to DV specification.
574  *
575  * @param[in,out] resource
576  *   Pointer to the modify-header resource.
577  * @param[in] action
578  *   Pointer to action specification.
579  * @param[out] error
580  *   Pointer to the error structure.
581  *
582  * @return
583  *   0 on success, a negative errno value otherwise and rte_errno is set.
584  */
585 static int
586 flow_dv_convert_action_modify_mac
587                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
588                          const struct rte_flow_action *action,
589                          struct rte_flow_error *error)
590 {
591         const struct rte_flow_action_set_mac *conf =
592                 (const struct rte_flow_action_set_mac *)(action->conf);
593         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
594         struct rte_flow_item_eth eth;
595         struct rte_flow_item_eth eth_mask;
596
597         memset(&eth, 0, sizeof(eth));
598         memset(&eth_mask, 0, sizeof(eth_mask));
599         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
600                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
601                        sizeof(eth.src.addr_bytes));
602                 memcpy(&eth_mask.src.addr_bytes,
603                        &rte_flow_item_eth_mask.src.addr_bytes,
604                        sizeof(eth_mask.src.addr_bytes));
605         } else {
606                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
607                        sizeof(eth.dst.addr_bytes));
608                 memcpy(&eth_mask.dst.addr_bytes,
609                        &rte_flow_item_eth_mask.dst.addr_bytes,
610                        sizeof(eth_mask.dst.addr_bytes));
611         }
612         item.spec = &eth;
613         item.mask = &eth_mask;
614         return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
615                                              MLX5_MODIFICATION_TYPE_SET, error);
616 }
617
618 /**
619  * Convert modify-header set VLAN VID action to DV specification.
620  *
621  * @param[in,out] resource
622  *   Pointer to the modify-header resource.
623  * @param[in] action
624  *   Pointer to action specification.
625  * @param[out] error
626  *   Pointer to the error structure.
627  *
628  * @return
629  *   0 on success, a negative errno value otherwise and rte_errno is set.
630  */
631 static int
632 flow_dv_convert_action_modify_vlan_vid
633                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
634                          const struct rte_flow_action *action,
635                          struct rte_flow_error *error)
636 {
637         const struct rte_flow_action_of_set_vlan_vid *conf =
638                 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
639         int i = resource->actions_num;
640         struct mlx5_modification_cmd *actions = resource->actions;
641         struct field_modify_info *field = modify_vlan_out_first_vid;
642
643         if (i >= MLX5_MAX_MODIFY_NUM)
644                 return rte_flow_error_set(error, EINVAL,
645                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
646                          "too many items to modify");
647         actions[i] = (struct mlx5_modification_cmd) {
648                 .action_type = MLX5_MODIFICATION_TYPE_SET,
649                 .field = field->id,
650                 .length = field->size,
651                 .offset = field->offset,
652         };
653         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
654         actions[i].data1 = conf->vlan_vid;
655         actions[i].data1 = actions[i].data1 << 16;
656         resource->actions_num = ++i;
657         return 0;
658 }
659
660 /**
661  * Convert modify-header set TP action to DV specification.
662  *
663  * @param[in,out] resource
664  *   Pointer to the modify-header resource.
665  * @param[in] action
666  *   Pointer to action specification.
667  * @param[in] items
668  *   Pointer to rte_flow_item objects list.
669  * @param[in] attr
670  *   Pointer to flow attributes structure.
671  * @param[in] dev_flow
672  *   Pointer to the sub flow.
673  * @param[in] tunnel_decap
674  *   Whether action is after tunnel decapsulation.
675  * @param[out] error
676  *   Pointer to the error structure.
677  *
678  * @return
679  *   0 on success, a negative errno value otherwise and rte_errno is set.
680  */
681 static int
682 flow_dv_convert_action_modify_tp
683                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
684                          const struct rte_flow_action *action,
685                          const struct rte_flow_item *items,
686                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
687                          bool tunnel_decap, struct rte_flow_error *error)
688 {
689         const struct rte_flow_action_set_tp *conf =
690                 (const struct rte_flow_action_set_tp *)(action->conf);
691         struct rte_flow_item item;
692         struct rte_flow_item_udp udp;
693         struct rte_flow_item_udp udp_mask;
694         struct rte_flow_item_tcp tcp;
695         struct rte_flow_item_tcp tcp_mask;
696         struct field_modify_info *field;
697
698         if (!attr->valid)
699                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
700         if (attr->udp) {
701                 memset(&udp, 0, sizeof(udp));
702                 memset(&udp_mask, 0, sizeof(udp_mask));
703                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
704                         udp.hdr.src_port = conf->port;
705                         udp_mask.hdr.src_port =
706                                         rte_flow_item_udp_mask.hdr.src_port;
707                 } else {
708                         udp.hdr.dst_port = conf->port;
709                         udp_mask.hdr.dst_port =
710                                         rte_flow_item_udp_mask.hdr.dst_port;
711                 }
712                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
713                 item.spec = &udp;
714                 item.mask = &udp_mask;
715                 field = modify_udp;
716         } else {
717                 MLX5_ASSERT(attr->tcp);
718                 memset(&tcp, 0, sizeof(tcp));
719                 memset(&tcp_mask, 0, sizeof(tcp_mask));
720                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
721                         tcp.hdr.src_port = conf->port;
722                         tcp_mask.hdr.src_port =
723                                         rte_flow_item_tcp_mask.hdr.src_port;
724                 } else {
725                         tcp.hdr.dst_port = conf->port;
726                         tcp_mask.hdr.dst_port =
727                                         rte_flow_item_tcp_mask.hdr.dst_port;
728                 }
729                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
730                 item.spec = &tcp;
731                 item.mask = &tcp_mask;
732                 field = modify_tcp;
733         }
734         return flow_dv_convert_modify_action(&item, field, NULL, resource,
735                                              MLX5_MODIFICATION_TYPE_SET, error);
736 }
737
738 /**
739  * Convert modify-header set TTL action to DV specification.
740  *
741  * @param[in,out] resource
742  *   Pointer to the modify-header resource.
743  * @param[in] action
744  *   Pointer to action specification.
745  * @param[in] items
746  *   Pointer to rte_flow_item objects list.
747  * @param[in] attr
748  *   Pointer to flow attributes structure.
749  * @param[in] dev_flow
750  *   Pointer to the sub flow.
751  * @param[in] tunnel_decap
752  *   Whether action is after tunnel decapsulation.
753  * @param[out] error
754  *   Pointer to the error structure.
755  *
756  * @return
757  *   0 on success, a negative errno value otherwise and rte_errno is set.
758  */
759 static int
760 flow_dv_convert_action_modify_ttl
761                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
762                          const struct rte_flow_action *action,
763                          const struct rte_flow_item *items,
764                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
765                          bool tunnel_decap, struct rte_flow_error *error)
766 {
767         const struct rte_flow_action_set_ttl *conf =
768                 (const struct rte_flow_action_set_ttl *)(action->conf);
769         struct rte_flow_item item;
770         struct rte_flow_item_ipv4 ipv4;
771         struct rte_flow_item_ipv4 ipv4_mask;
772         struct rte_flow_item_ipv6 ipv6;
773         struct rte_flow_item_ipv6 ipv6_mask;
774         struct field_modify_info *field;
775
776         if (!attr->valid)
777                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
778         if (attr->ipv4) {
779                 memset(&ipv4, 0, sizeof(ipv4));
780                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
781                 ipv4.hdr.time_to_live = conf->ttl_value;
782                 ipv4_mask.hdr.time_to_live = 0xFF;
783                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
784                 item.spec = &ipv4;
785                 item.mask = &ipv4_mask;
786                 field = modify_ipv4;
787         } else {
788                 MLX5_ASSERT(attr->ipv6);
789                 memset(&ipv6, 0, sizeof(ipv6));
790                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
791                 ipv6.hdr.hop_limits = conf->ttl_value;
792                 ipv6_mask.hdr.hop_limits = 0xFF;
793                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
794                 item.spec = &ipv6;
795                 item.mask = &ipv6_mask;
796                 field = modify_ipv6;
797         }
798         return flow_dv_convert_modify_action(&item, field, NULL, resource,
799                                              MLX5_MODIFICATION_TYPE_SET, error);
800 }
801
802 /**
803  * Convert modify-header decrement TTL action to DV specification.
804  *
805  * @param[in,out] resource
806  *   Pointer to the modify-header resource.
807  * @param[in] action
808  *   Pointer to action specification.
809  * @param[in] items
810  *   Pointer to rte_flow_item objects list.
811  * @param[in] attr
812  *   Pointer to flow attributes structure.
813  * @param[in] dev_flow
814  *   Pointer to the sub flow.
815  * @param[in] tunnel_decap
816  *   Whether action is after tunnel decapsulation.
817  * @param[out] error
818  *   Pointer to the error structure.
819  *
820  * @return
821  *   0 on success, a negative errno value otherwise and rte_errno is set.
822  */
823 static int
824 flow_dv_convert_action_modify_dec_ttl
825                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
826                          const struct rte_flow_item *items,
827                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
828                          bool tunnel_decap, struct rte_flow_error *error)
829 {
830         struct rte_flow_item item;
831         struct rte_flow_item_ipv4 ipv4;
832         struct rte_flow_item_ipv4 ipv4_mask;
833         struct rte_flow_item_ipv6 ipv6;
834         struct rte_flow_item_ipv6 ipv6_mask;
835         struct field_modify_info *field;
836
837         if (!attr->valid)
838                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
839         if (attr->ipv4) {
840                 memset(&ipv4, 0, sizeof(ipv4));
841                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
842                 ipv4.hdr.time_to_live = 0xFF;
843                 ipv4_mask.hdr.time_to_live = 0xFF;
844                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
845                 item.spec = &ipv4;
846                 item.mask = &ipv4_mask;
847                 field = modify_ipv4;
848         } else {
849                 MLX5_ASSERT(attr->ipv6);
850                 memset(&ipv6, 0, sizeof(ipv6));
851                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
852                 ipv6.hdr.hop_limits = 0xFF;
853                 ipv6_mask.hdr.hop_limits = 0xFF;
854                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
855                 item.spec = &ipv6;
856                 item.mask = &ipv6_mask;
857                 field = modify_ipv6;
858         }
859         return flow_dv_convert_modify_action(&item, field, NULL, resource,
860                                              MLX5_MODIFICATION_TYPE_ADD, error);
861 }
862
863 /**
864  * Convert modify-header increment/decrement TCP Sequence number
865  * to DV specification.
866  *
867  * @param[in,out] resource
868  *   Pointer to the modify-header resource.
869  * @param[in] action
870  *   Pointer to action specification.
871  * @param[out] error
872  *   Pointer to the error structure.
873  *
874  * @return
875  *   0 on success, a negative errno value otherwise and rte_errno is set.
876  */
877 static int
878 flow_dv_convert_action_modify_tcp_seq
879                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
880                          const struct rte_flow_action *action,
881                          struct rte_flow_error *error)
882 {
883         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
884         uint64_t value = rte_be_to_cpu_32(*conf);
885         struct rte_flow_item item;
886         struct rte_flow_item_tcp tcp;
887         struct rte_flow_item_tcp tcp_mask;
888
889         memset(&tcp, 0, sizeof(tcp));
890         memset(&tcp_mask, 0, sizeof(tcp_mask));
891         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
892                 /*
893                  * The HW has no decrement operation, only increment operation.
894                  * To simulate decrement X from Y using increment operation
895                  * we need to add UINT32_MAX X times to Y.
896                  * Each adding of UINT32_MAX decrements Y by 1.
897                  */
898                 value *= UINT32_MAX;
899         tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
900         tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
901         item.type = RTE_FLOW_ITEM_TYPE_TCP;
902         item.spec = &tcp;
903         item.mask = &tcp_mask;
904         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
905                                              MLX5_MODIFICATION_TYPE_ADD, error);
906 }
907
908 /**
909  * Convert modify-header increment/decrement TCP Acknowledgment number
910  * to DV specification.
911  *
912  * @param[in,out] resource
913  *   Pointer to the modify-header resource.
914  * @param[in] action
915  *   Pointer to action specification.
916  * @param[out] error
917  *   Pointer to the error structure.
918  *
919  * @return
920  *   0 on success, a negative errno value otherwise and rte_errno is set.
921  */
922 static int
923 flow_dv_convert_action_modify_tcp_ack
924                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
925                          const struct rte_flow_action *action,
926                          struct rte_flow_error *error)
927 {
928         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
929         uint64_t value = rte_be_to_cpu_32(*conf);
930         struct rte_flow_item item;
931         struct rte_flow_item_tcp tcp;
932         struct rte_flow_item_tcp tcp_mask;
933
934         memset(&tcp, 0, sizeof(tcp));
935         memset(&tcp_mask, 0, sizeof(tcp_mask));
936         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
937                 /*
938                  * The HW has no decrement operation, only increment operation.
939                  * To simulate decrement X from Y using increment operation
940                  * we need to add UINT32_MAX X times to Y.
941                  * Each adding of UINT32_MAX decrements Y by 1.
942                  */
943                 value *= UINT32_MAX;
944         tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
945         tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
946         item.type = RTE_FLOW_ITEM_TYPE_TCP;
947         item.spec = &tcp;
948         item.mask = &tcp_mask;
949         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
950                                              MLX5_MODIFICATION_TYPE_ADD, error);
951 }
952
953 static enum mlx5_modification_field reg_to_field[] = {
954         [REG_NONE] = MLX5_MODI_OUT_NONE,
955         [REG_A] = MLX5_MODI_META_DATA_REG_A,
956         [REG_B] = MLX5_MODI_META_DATA_REG_B,
957         [REG_C_0] = MLX5_MODI_META_REG_C_0,
958         [REG_C_1] = MLX5_MODI_META_REG_C_1,
959         [REG_C_2] = MLX5_MODI_META_REG_C_2,
960         [REG_C_3] = MLX5_MODI_META_REG_C_3,
961         [REG_C_4] = MLX5_MODI_META_REG_C_4,
962         [REG_C_5] = MLX5_MODI_META_REG_C_5,
963         [REG_C_6] = MLX5_MODI_META_REG_C_6,
964         [REG_C_7] = MLX5_MODI_META_REG_C_7,
965 };
966
967 /**
968  * Convert register set to DV specification.
969  *
970  * @param[in,out] resource
971  *   Pointer to the modify-header resource.
972  * @param[in] action
973  *   Pointer to action specification.
974  * @param[out] error
975  *   Pointer to the error structure.
976  *
977  * @return
978  *   0 on success, a negative errno value otherwise and rte_errno is set.
979  */
980 static int
981 flow_dv_convert_action_set_reg
982                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
983                          const struct rte_flow_action *action,
984                          struct rte_flow_error *error)
985 {
986         const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
987         struct mlx5_modification_cmd *actions = resource->actions;
988         uint32_t i = resource->actions_num;
989
990         if (i >= MLX5_MAX_MODIFY_NUM)
991                 return rte_flow_error_set(error, EINVAL,
992                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
993                                           "too many items to modify");
994         MLX5_ASSERT(conf->id != REG_NONE);
995         MLX5_ASSERT(conf->id < RTE_DIM(reg_to_field));
996         actions[i] = (struct mlx5_modification_cmd) {
997                 .action_type = MLX5_MODIFICATION_TYPE_SET,
998                 .field = reg_to_field[conf->id],
999         };
1000         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
1001         actions[i].data1 = rte_cpu_to_be_32(conf->data);
1002         ++i;
1003         resource->actions_num = i;
1004         return 0;
1005 }
1006
1007 /**
1008  * Convert SET_TAG action to DV specification.
1009  *
1010  * @param[in] dev
1011  *   Pointer to the rte_eth_dev structure.
1012  * @param[in,out] resource
1013  *   Pointer to the modify-header resource.
1014  * @param[in] conf
1015  *   Pointer to action specification.
1016  * @param[out] error
1017  *   Pointer to the error structure.
1018  *
1019  * @return
1020  *   0 on success, a negative errno value otherwise and rte_errno is set.
1021  */
1022 static int
1023 flow_dv_convert_action_set_tag
1024                         (struct rte_eth_dev *dev,
1025                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1026                          const struct rte_flow_action_set_tag *conf,
1027                          struct rte_flow_error *error)
1028 {
1029         rte_be32_t data = rte_cpu_to_be_32(conf->data);
1030         rte_be32_t mask = rte_cpu_to_be_32(conf->mask);
1031         struct rte_flow_item item = {
1032                 .spec = &data,
1033                 .mask = &mask,
1034         };
1035         struct field_modify_info reg_c_x[] = {
1036                 [1] = {0, 0, 0},
1037         };
1038         enum mlx5_modification_field reg_type;
1039         int ret;
1040
1041         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
1042         if (ret < 0)
1043                 return ret;
1044         MLX5_ASSERT(ret != REG_NONE);
1045         MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
1046         reg_type = reg_to_field[ret];
1047         MLX5_ASSERT(reg_type > 0);
1048         reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
1049         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1050                                              MLX5_MODIFICATION_TYPE_SET, error);
1051 }
1052
1053 /**
1054  * Convert internal COPY_REG action to DV specification.
1055  *
1056  * @param[in] dev
1057  *   Pointer to the rte_eth_dev structure.
1058  * @param[in,out] res
1059  *   Pointer to the modify-header resource.
1060  * @param[in] action
1061  *   Pointer to action specification.
1062  * @param[out] error
1063  *   Pointer to the error structure.
1064  *
1065  * @return
1066  *   0 on success, a negative errno value otherwise and rte_errno is set.
1067  */
1068 static int
1069 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
1070                                  struct mlx5_flow_dv_modify_hdr_resource *res,
1071                                  const struct rte_flow_action *action,
1072                                  struct rte_flow_error *error)
1073 {
1074         const struct mlx5_flow_action_copy_mreg *conf = action->conf;
1075         rte_be32_t mask = RTE_BE32(UINT32_MAX);
1076         struct rte_flow_item item = {
1077                 .spec = NULL,
1078                 .mask = &mask,
1079         };
1080         struct field_modify_info reg_src[] = {
1081                 {4, 0, reg_to_field[conf->src]},
1082                 {0, 0, 0},
1083         };
1084         struct field_modify_info reg_dst = {
1085                 .offset = 0,
1086                 .id = reg_to_field[conf->dst],
1087         };
1088         /* Adjust reg_c[0] usage according to reported mask. */
1089         if (conf->dst == REG_C_0 || conf->src == REG_C_0) {
1090                 struct mlx5_priv *priv = dev->data->dev_private;
1091                 uint32_t reg_c0 = priv->sh->dv_regc0_mask;
1092
1093                 MLX5_ASSERT(reg_c0);
1094                 MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
1095                 if (conf->dst == REG_C_0) {
1096                         /* Copy to reg_c[0], within mask only. */
1097                         reg_dst.offset = rte_bsf32(reg_c0);
1098                         /*
1099                          * Mask is ignoring the enianness, because
1100                          * there is no conversion in datapath.
1101                          */
1102 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1103                         /* Copy from destination lower bits to reg_c[0]. */
1104                         mask = reg_c0 >> reg_dst.offset;
1105 #else
1106                         /* Copy from destination upper bits to reg_c[0]. */
1107                         mask = reg_c0 << (sizeof(reg_c0) * CHAR_BIT -
1108                                           rte_fls_u32(reg_c0));
1109 #endif
1110                 } else {
1111                         mask = rte_cpu_to_be_32(reg_c0);
1112 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1113                         /* Copy from reg_c[0] to destination lower bits. */
1114                         reg_dst.offset = 0;
1115 #else
1116                         /* Copy from reg_c[0] to destination upper bits. */
1117                         reg_dst.offset = sizeof(reg_c0) * CHAR_BIT -
1118                                          (rte_fls_u32(reg_c0) -
1119                                           rte_bsf32(reg_c0));
1120 #endif
1121                 }
1122         }
1123         return flow_dv_convert_modify_action(&item,
1124                                              reg_src, &reg_dst, res,
1125                                              MLX5_MODIFICATION_TYPE_COPY,
1126                                              error);
1127 }
1128
1129 /**
1130  * Convert MARK action to DV specification. This routine is used
1131  * in extensive metadata only and requires metadata register to be
1132  * handled. In legacy mode hardware tag resource is engaged.
1133  *
1134  * @param[in] dev
1135  *   Pointer to the rte_eth_dev structure.
1136  * @param[in] conf
1137  *   Pointer to MARK action specification.
1138  * @param[in,out] resource
1139  *   Pointer to the modify-header resource.
1140  * @param[out] error
1141  *   Pointer to the error structure.
1142  *
1143  * @return
1144  *   0 on success, a negative errno value otherwise and rte_errno is set.
1145  */
1146 static int
1147 flow_dv_convert_action_mark(struct rte_eth_dev *dev,
1148                             const struct rte_flow_action_mark *conf,
1149                             struct mlx5_flow_dv_modify_hdr_resource *resource,
1150                             struct rte_flow_error *error)
1151 {
1152         struct mlx5_priv *priv = dev->data->dev_private;
1153         rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK &
1154                                            priv->sh->dv_mark_mask);
1155         rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask;
1156         struct rte_flow_item item = {
1157                 .spec = &data,
1158                 .mask = &mask,
1159         };
1160         struct field_modify_info reg_c_x[] = {
1161                 {4, 0, 0}, /* dynamic instead of MLX5_MODI_META_REG_C_1. */
1162                 {0, 0, 0},
1163         };
1164         int reg;
1165
1166         if (!mask)
1167                 return rte_flow_error_set(error, EINVAL,
1168                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1169                                           NULL, "zero mark action mask");
1170         reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1171         if (reg < 0)
1172                 return reg;
1173         MLX5_ASSERT(reg > 0);
1174         if (reg == REG_C_0) {
1175                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1176                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1177
1178                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1179                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1180                 mask = rte_cpu_to_be_32(mask << shl_c0);
1181         }
1182         reg_c_x[0].id = reg_to_field[reg];
1183         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1184                                              MLX5_MODIFICATION_TYPE_SET, error);
1185 }
1186
1187 /**
1188  * Get metadata register index for specified steering domain.
1189  *
1190  * @param[in] dev
1191  *   Pointer to the rte_eth_dev structure.
1192  * @param[in] attr
1193  *   Attributes of flow to determine steering domain.
1194  * @param[out] error
1195  *   Pointer to the error structure.
1196  *
1197  * @return
1198  *   positive index on success, a negative errno value otherwise
1199  *   and rte_errno is set.
1200  */
1201 static enum modify_reg
1202 flow_dv_get_metadata_reg(struct rte_eth_dev *dev,
1203                          const struct rte_flow_attr *attr,
1204                          struct rte_flow_error *error)
1205 {
1206         int reg =
1207                 mlx5_flow_get_reg_id(dev, attr->transfer ?
1208                                           MLX5_METADATA_FDB :
1209                                             attr->egress ?
1210                                             MLX5_METADATA_TX :
1211                                             MLX5_METADATA_RX, 0, error);
1212         if (reg < 0)
1213                 return rte_flow_error_set(error,
1214                                           ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1215                                           NULL, "unavailable "
1216                                           "metadata register");
1217         return reg;
1218 }
1219
1220 /**
1221  * Convert SET_META action to DV specification.
1222  *
1223  * @param[in] dev
1224  *   Pointer to the rte_eth_dev structure.
1225  * @param[in,out] resource
1226  *   Pointer to the modify-header resource.
1227  * @param[in] attr
1228  *   Attributes of flow that includes this item.
1229  * @param[in] conf
1230  *   Pointer to action specification.
1231  * @param[out] error
1232  *   Pointer to the error structure.
1233  *
1234  * @return
1235  *   0 on success, a negative errno value otherwise and rte_errno is set.
1236  */
1237 static int
1238 flow_dv_convert_action_set_meta
1239                         (struct rte_eth_dev *dev,
1240                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1241                          const struct rte_flow_attr *attr,
1242                          const struct rte_flow_action_set_meta *conf,
1243                          struct rte_flow_error *error)
1244 {
1245         uint32_t data = conf->data;
1246         uint32_t mask = conf->mask;
1247         struct rte_flow_item item = {
1248                 .spec = &data,
1249                 .mask = &mask,
1250         };
1251         struct field_modify_info reg_c_x[] = {
1252                 [1] = {0, 0, 0},
1253         };
1254         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1255
1256         if (reg < 0)
1257                 return reg;
1258         /*
1259          * In datapath code there is no endianness
1260          * coversions for perfromance reasons, all
1261          * pattern conversions are done in rte_flow.
1262          */
1263         if (reg == REG_C_0) {
1264                 struct mlx5_priv *priv = dev->data->dev_private;
1265                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1266                 uint32_t shl_c0;
1267
1268                 MLX5_ASSERT(msk_c0);
1269 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1270                 shl_c0 = rte_bsf32(msk_c0);
1271 #else
1272                 shl_c0 = sizeof(msk_c0) * CHAR_BIT - rte_fls_u32(msk_c0);
1273 #endif
1274                 mask <<= shl_c0;
1275                 data <<= shl_c0;
1276                 MLX5_ASSERT(!(~msk_c0 & rte_cpu_to_be_32(mask)));
1277         }
1278         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1279         /* The routine expects parameters in memory as big-endian ones. */
1280         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1281                                              MLX5_MODIFICATION_TYPE_SET, error);
1282 }
1283
1284 /**
1285  * Convert modify-header set IPv4 DSCP action to DV specification.
1286  *
1287  * @param[in,out] resource
1288  *   Pointer to the modify-header resource.
1289  * @param[in] action
1290  *   Pointer to action specification.
1291  * @param[out] error
1292  *   Pointer to the error structure.
1293  *
1294  * @return
1295  *   0 on success, a negative errno value otherwise and rte_errno is set.
1296  */
1297 static int
1298 flow_dv_convert_action_modify_ipv4_dscp
1299                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1300                          const struct rte_flow_action *action,
1301                          struct rte_flow_error *error)
1302 {
1303         const struct rte_flow_action_set_dscp *conf =
1304                 (const struct rte_flow_action_set_dscp *)(action->conf);
1305         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
1306         struct rte_flow_item_ipv4 ipv4;
1307         struct rte_flow_item_ipv4 ipv4_mask;
1308
1309         memset(&ipv4, 0, sizeof(ipv4));
1310         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
1311         ipv4.hdr.type_of_service = conf->dscp;
1312         ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2;
1313         item.spec = &ipv4;
1314         item.mask = &ipv4_mask;
1315         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
1316                                              MLX5_MODIFICATION_TYPE_SET, error);
1317 }
1318
1319 /**
1320  * Convert modify-header set IPv6 DSCP action to DV specification.
1321  *
1322  * @param[in,out] resource
1323  *   Pointer to the modify-header resource.
1324  * @param[in] action
1325  *   Pointer to action specification.
1326  * @param[out] error
1327  *   Pointer to the error structure.
1328  *
1329  * @return
1330  *   0 on success, a negative errno value otherwise and rte_errno is set.
1331  */
1332 static int
1333 flow_dv_convert_action_modify_ipv6_dscp
1334                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1335                          const struct rte_flow_action *action,
1336                          struct rte_flow_error *error)
1337 {
1338         const struct rte_flow_action_set_dscp *conf =
1339                 (const struct rte_flow_action_set_dscp *)(action->conf);
1340         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
1341         struct rte_flow_item_ipv6 ipv6;
1342         struct rte_flow_item_ipv6 ipv6_mask;
1343
1344         memset(&ipv6, 0, sizeof(ipv6));
1345         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
1346         /*
1347          * Even though the DSCP bits offset of IPv6 is not byte aligned,
1348          * rdma-core only accept the DSCP bits byte aligned start from
1349          * bit 0 to 5 as to be compatible with IPv4. No need to shift the
1350          * bits in IPv6 case as rdma-core requires byte aligned value.
1351          */
1352         ipv6.hdr.vtc_flow = conf->dscp;
1353         ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22;
1354         item.spec = &ipv6;
1355         item.mask = &ipv6_mask;
1356         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
1357                                              MLX5_MODIFICATION_TYPE_SET, error);
1358 }
1359
1360 /**
1361  * Validate MARK item.
1362  *
1363  * @param[in] dev
1364  *   Pointer to the rte_eth_dev structure.
1365  * @param[in] item
1366  *   Item specification.
1367  * @param[in] attr
1368  *   Attributes of flow that includes this item.
1369  * @param[out] error
1370  *   Pointer to error structure.
1371  *
1372  * @return
1373  *   0 on success, a negative errno value otherwise and rte_errno is set.
1374  */
1375 static int
1376 flow_dv_validate_item_mark(struct rte_eth_dev *dev,
1377                            const struct rte_flow_item *item,
1378                            const struct rte_flow_attr *attr __rte_unused,
1379                            struct rte_flow_error *error)
1380 {
1381         struct mlx5_priv *priv = dev->data->dev_private;
1382         struct mlx5_dev_config *config = &priv->config;
1383         const struct rte_flow_item_mark *spec = item->spec;
1384         const struct rte_flow_item_mark *mask = item->mask;
1385         const struct rte_flow_item_mark nic_mask = {
1386                 .id = priv->sh->dv_mark_mask,
1387         };
1388         int ret;
1389
1390         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1391                 return rte_flow_error_set(error, ENOTSUP,
1392                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1393                                           "extended metadata feature"
1394                                           " isn't enabled");
1395         if (!mlx5_flow_ext_mreg_supported(dev))
1396                 return rte_flow_error_set(error, ENOTSUP,
1397                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1398                                           "extended metadata register"
1399                                           " isn't supported");
1400         if (!nic_mask.id)
1401                 return rte_flow_error_set(error, ENOTSUP,
1402                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1403                                           "extended metadata register"
1404                                           " isn't available");
1405         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1406         if (ret < 0)
1407                 return ret;
1408         if (!spec)
1409                 return rte_flow_error_set(error, EINVAL,
1410                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1411                                           item->spec,
1412                                           "data cannot be empty");
1413         if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id))
1414                 return rte_flow_error_set(error, EINVAL,
1415                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1416                                           &spec->id,
1417                                           "mark id exceeds the limit");
1418         if (!mask)
1419                 mask = &nic_mask;
1420         if (!mask->id)
1421                 return rte_flow_error_set(error, EINVAL,
1422                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1423                                         "mask cannot be zero");
1424
1425         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1426                                         (const uint8_t *)&nic_mask,
1427                                         sizeof(struct rte_flow_item_mark),
1428                                         error);
1429         if (ret < 0)
1430                 return ret;
1431         return 0;
1432 }
1433
1434 /**
1435  * Validate META item.
1436  *
1437  * @param[in] dev
1438  *   Pointer to the rte_eth_dev structure.
1439  * @param[in] item
1440  *   Item specification.
1441  * @param[in] attr
1442  *   Attributes of flow that includes this item.
1443  * @param[out] error
1444  *   Pointer to error structure.
1445  *
1446  * @return
1447  *   0 on success, a negative errno value otherwise and rte_errno is set.
1448  */
1449 static int
1450 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
1451                            const struct rte_flow_item *item,
1452                            const struct rte_flow_attr *attr,
1453                            struct rte_flow_error *error)
1454 {
1455         struct mlx5_priv *priv = dev->data->dev_private;
1456         struct mlx5_dev_config *config = &priv->config;
1457         const struct rte_flow_item_meta *spec = item->spec;
1458         const struct rte_flow_item_meta *mask = item->mask;
1459         struct rte_flow_item_meta nic_mask = {
1460                 .data = UINT32_MAX
1461         };
1462         int reg;
1463         int ret;
1464
1465         if (!spec)
1466                 return rte_flow_error_set(error, EINVAL,
1467                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1468                                           item->spec,
1469                                           "data cannot be empty");
1470         if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
1471                 if (!mlx5_flow_ext_mreg_supported(dev))
1472                         return rte_flow_error_set(error, ENOTSUP,
1473                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1474                                           "extended metadata register"
1475                                           " isn't supported");
1476                 reg = flow_dv_get_metadata_reg(dev, attr, error);
1477                 if (reg < 0)
1478                         return reg;
1479                 if (reg == REG_B)
1480                         return rte_flow_error_set(error, ENOTSUP,
1481                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1482                                           "match on reg_b "
1483                                           "isn't supported");
1484                 if (reg != REG_A)
1485                         nic_mask.data = priv->sh->dv_meta_mask;
1486         } else if (attr->transfer) {
1487                 return rte_flow_error_set(error, ENOTSUP,
1488                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1489                                         "extended metadata feature "
1490                                         "should be enabled when "
1491                                         "meta item is requested "
1492                                         "with e-switch mode ");
1493         }
1494         if (!mask)
1495                 mask = &rte_flow_item_meta_mask;
1496         if (!mask->data)
1497                 return rte_flow_error_set(error, EINVAL,
1498                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1499                                         "mask cannot be zero");
1500
1501         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1502                                         (const uint8_t *)&nic_mask,
1503                                         sizeof(struct rte_flow_item_meta),
1504                                         error);
1505         return ret;
1506 }
1507
1508 /**
1509  * Validate TAG item.
1510  *
1511  * @param[in] dev
1512  *   Pointer to the rte_eth_dev structure.
1513  * @param[in] item
1514  *   Item specification.
1515  * @param[in] attr
1516  *   Attributes of flow that includes this item.
1517  * @param[out] error
1518  *   Pointer to error structure.
1519  *
1520  * @return
1521  *   0 on success, a negative errno value otherwise and rte_errno is set.
1522  */
1523 static int
1524 flow_dv_validate_item_tag(struct rte_eth_dev *dev,
1525                           const struct rte_flow_item *item,
1526                           const struct rte_flow_attr *attr __rte_unused,
1527                           struct rte_flow_error *error)
1528 {
1529         const struct rte_flow_item_tag *spec = item->spec;
1530         const struct rte_flow_item_tag *mask = item->mask;
1531         const struct rte_flow_item_tag nic_mask = {
1532                 .data = RTE_BE32(UINT32_MAX),
1533                 .index = 0xff,
1534         };
1535         int ret;
1536
1537         if (!mlx5_flow_ext_mreg_supported(dev))
1538                 return rte_flow_error_set(error, ENOTSUP,
1539                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1540                                           "extensive metadata register"
1541                                           " isn't supported");
1542         if (!spec)
1543                 return rte_flow_error_set(error, EINVAL,
1544                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1545                                           item->spec,
1546                                           "data cannot be empty");
1547         if (!mask)
1548                 mask = &rte_flow_item_tag_mask;
1549         if (!mask->data)
1550                 return rte_flow_error_set(error, EINVAL,
1551                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1552                                         "mask cannot be zero");
1553
1554         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1555                                         (const uint8_t *)&nic_mask,
1556                                         sizeof(struct rte_flow_item_tag),
1557                                         error);
1558         if (ret < 0)
1559                 return ret;
1560         if (mask->index != 0xff)
1561                 return rte_flow_error_set(error, EINVAL,
1562                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1563                                           "partial mask for tag index"
1564                                           " is not supported");
1565         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
1566         if (ret < 0)
1567                 return ret;
1568         MLX5_ASSERT(ret != REG_NONE);
1569         return 0;
1570 }
1571
1572 /**
1573  * Validate vport item.
1574  *
1575  * @param[in] dev
1576  *   Pointer to the rte_eth_dev structure.
1577  * @param[in] item
1578  *   Item specification.
1579  * @param[in] attr
1580  *   Attributes of flow that includes this item.
1581  * @param[in] item_flags
1582  *   Bit-fields that holds the items detected until now.
1583  * @param[out] error
1584  *   Pointer to error structure.
1585  *
1586  * @return
1587  *   0 on success, a negative errno value otherwise and rte_errno is set.
1588  */
1589 static int
1590 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
1591                               const struct rte_flow_item *item,
1592                               const struct rte_flow_attr *attr,
1593                               uint64_t item_flags,
1594                               struct rte_flow_error *error)
1595 {
1596         const struct rte_flow_item_port_id *spec = item->spec;
1597         const struct rte_flow_item_port_id *mask = item->mask;
1598         const struct rte_flow_item_port_id switch_mask = {
1599                         .id = 0xffffffff,
1600         };
1601         struct mlx5_priv *esw_priv;
1602         struct mlx5_priv *dev_priv;
1603         int ret;
1604
1605         if (!attr->transfer)
1606                 return rte_flow_error_set(error, EINVAL,
1607                                           RTE_FLOW_ERROR_TYPE_ITEM,
1608                                           NULL,
1609                                           "match on port id is valid only"
1610                                           " when transfer flag is enabled");
1611         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
1612                 return rte_flow_error_set(error, ENOTSUP,
1613                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1614                                           "multiple source ports are not"
1615                                           " supported");
1616         if (!mask)
1617                 mask = &switch_mask;
1618         if (mask->id != 0xffffffff)
1619                 return rte_flow_error_set(error, ENOTSUP,
1620                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
1621                                            mask,
1622                                            "no support for partial mask on"
1623                                            " \"id\" field");
1624         ret = mlx5_flow_item_acceptable
1625                                 (item, (const uint8_t *)mask,
1626                                  (const uint8_t *)&rte_flow_item_port_id_mask,
1627                                  sizeof(struct rte_flow_item_port_id),
1628                                  error);
1629         if (ret)
1630                 return ret;
1631         if (!spec)
1632                 return 0;
1633         esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
1634         if (!esw_priv)
1635                 return rte_flow_error_set(error, rte_errno,
1636                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
1637                                           "failed to obtain E-Switch info for"
1638                                           " port");
1639         dev_priv = mlx5_dev_to_eswitch_info(dev);
1640         if (!dev_priv)
1641                 return rte_flow_error_set(error, rte_errno,
1642                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1643                                           NULL,
1644                                           "failed to obtain E-Switch info");
1645         if (esw_priv->domain_id != dev_priv->domain_id)
1646                 return rte_flow_error_set(error, EINVAL,
1647                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
1648                                           "cannot match on a port from a"
1649                                           " different E-Switch");
1650         return 0;
1651 }
1652
1653 /**
1654  * Validate VLAN item.
1655  *
1656  * @param[in] item
1657  *   Item specification.
1658  * @param[in] item_flags
1659  *   Bit-fields that holds the items detected until now.
1660  * @param[in] dev
1661  *   Ethernet device flow is being created on.
1662  * @param[out] error
1663  *   Pointer to error structure.
1664  *
1665  * @return
1666  *   0 on success, a negative errno value otherwise and rte_errno is set.
1667  */
1668 static int
1669 flow_dv_validate_item_vlan(const struct rte_flow_item *item,
1670                            uint64_t item_flags,
1671                            struct rte_eth_dev *dev,
1672                            struct rte_flow_error *error)
1673 {
1674         const struct rte_flow_item_vlan *mask = item->mask;
1675         const struct rte_flow_item_vlan nic_mask = {
1676                 .tci = RTE_BE16(UINT16_MAX),
1677                 .inner_type = RTE_BE16(UINT16_MAX),
1678         };
1679         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1680         int ret;
1681         const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
1682                                         MLX5_FLOW_LAYER_INNER_L4) :
1683                                        (MLX5_FLOW_LAYER_OUTER_L3 |
1684                                         MLX5_FLOW_LAYER_OUTER_L4);
1685         const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
1686                                         MLX5_FLOW_LAYER_OUTER_VLAN;
1687
1688         if (item_flags & vlanm)
1689                 return rte_flow_error_set(error, EINVAL,
1690                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1691                                           "multiple VLAN layers not supported");
1692         else if ((item_flags & l34m) != 0)
1693                 return rte_flow_error_set(error, EINVAL,
1694                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1695                                           "VLAN cannot follow L3/L4 layer");
1696         if (!mask)
1697                 mask = &rte_flow_item_vlan_mask;
1698         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1699                                         (const uint8_t *)&nic_mask,
1700                                         sizeof(struct rte_flow_item_vlan),
1701                                         error);
1702         if (ret)
1703                 return ret;
1704         if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
1705                 struct mlx5_priv *priv = dev->data->dev_private;
1706
1707                 if (priv->vmwa_context) {
1708                         /*
1709                          * Non-NULL context means we have a virtual machine
1710                          * and SR-IOV enabled, we have to create VLAN interface
1711                          * to make hypervisor to setup E-Switch vport
1712                          * context correctly. We avoid creating the multiple
1713                          * VLAN interfaces, so we cannot support VLAN tag mask.
1714                          */
1715                         return rte_flow_error_set(error, EINVAL,
1716                                                   RTE_FLOW_ERROR_TYPE_ITEM,
1717                                                   item,
1718                                                   "VLAN tag mask is not"
1719                                                   " supported in virtual"
1720                                                   " environment");
1721                 }
1722         }
1723         return 0;
1724 }
1725
1726 /*
1727  * GTP flags are contained in 1 byte of the format:
1728  * -------------------------------------------
1729  * | bit   | 0 - 2   | 3  | 4   | 5 | 6 | 7  |
1730  * |-----------------------------------------|
1731  * | value | Version | PT | Res | E | S | PN |
1732  * -------------------------------------------
1733  *
1734  * Matching is supported only for GTP flags E, S, PN.
1735  */
1736 #define MLX5_GTP_FLAGS_MASK     0x07
1737
1738 /**
1739  * Validate GTP item.
1740  *
1741  * @param[in] dev
1742  *   Pointer to the rte_eth_dev structure.
1743  * @param[in] item
1744  *   Item specification.
1745  * @param[in] item_flags
1746  *   Bit-fields that holds the items detected until now.
1747  * @param[out] error
1748  *   Pointer to error structure.
1749  *
1750  * @return
1751  *   0 on success, a negative errno value otherwise and rte_errno is set.
1752  */
1753 static int
1754 flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
1755                           const struct rte_flow_item *item,
1756                           uint64_t item_flags,
1757                           struct rte_flow_error *error)
1758 {
1759         struct mlx5_priv *priv = dev->data->dev_private;
1760         const struct rte_flow_item_gtp *spec = item->spec;
1761         const struct rte_flow_item_gtp *mask = item->mask;
1762         const struct rte_flow_item_gtp nic_mask = {
1763                 .v_pt_rsv_flags = MLX5_GTP_FLAGS_MASK,
1764                 .msg_type = 0xff,
1765                 .teid = RTE_BE32(0xffffffff),
1766         };
1767
1768         if (!priv->config.hca_attr.tunnel_stateless_gtp)
1769                 return rte_flow_error_set(error, ENOTSUP,
1770                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1771                                           "GTP support is not enabled");
1772         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
1773                 return rte_flow_error_set(error, ENOTSUP,
1774                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1775                                           "multiple tunnel layers not"
1776                                           " supported");
1777         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
1778                 return rte_flow_error_set(error, EINVAL,
1779                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1780                                           "no outer UDP layer found");
1781         if (!mask)
1782                 mask = &rte_flow_item_gtp_mask;
1783         if (spec && spec->v_pt_rsv_flags & ~MLX5_GTP_FLAGS_MASK)
1784                 return rte_flow_error_set(error, ENOTSUP,
1785                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1786                                           "Match is supported for GTP"
1787                                           " flags only");
1788         return mlx5_flow_item_acceptable
1789                 (item, (const uint8_t *)mask,
1790                  (const uint8_t *)&nic_mask,
1791                  sizeof(struct rte_flow_item_gtp),
1792                  error);
1793 }
1794
1795 /**
1796  * Validate the pop VLAN action.
1797  *
1798  * @param[in] dev
1799  *   Pointer to the rte_eth_dev structure.
1800  * @param[in] action_flags
1801  *   Holds the actions detected until now.
1802  * @param[in] action
1803  *   Pointer to the pop vlan action.
1804  * @param[in] item_flags
1805  *   The items found in this flow rule.
1806  * @param[in] attr
1807  *   Pointer to flow attributes.
1808  * @param[out] error
1809  *   Pointer to error structure.
1810  *
1811  * @return
1812  *   0 on success, a negative errno value otherwise and rte_errno is set.
1813  */
1814 static int
1815 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
1816                                  uint64_t action_flags,
1817                                  const struct rte_flow_action *action,
1818                                  uint64_t item_flags,
1819                                  const struct rte_flow_attr *attr,
1820                                  struct rte_flow_error *error)
1821 {
1822         const struct mlx5_priv *priv = dev->data->dev_private;
1823
1824         (void)action;
1825         (void)attr;
1826         if (!priv->sh->pop_vlan_action)
1827                 return rte_flow_error_set(error, ENOTSUP,
1828                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1829                                           NULL,
1830                                           "pop vlan action is not supported");
1831         if (attr->egress)
1832                 return rte_flow_error_set(error, ENOTSUP,
1833                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1834                                           NULL,
1835                                           "pop vlan action not supported for "
1836                                           "egress");
1837         if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
1838                 return rte_flow_error_set(error, ENOTSUP,
1839                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1840                                           "no support for multiple VLAN "
1841                                           "actions");
1842         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
1843                 return rte_flow_error_set(error, ENOTSUP,
1844                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1845                                           NULL,
1846                                           "cannot pop vlan without a "
1847                                           "match on (outer) vlan in the flow");
1848         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
1849                 return rte_flow_error_set(error, EINVAL,
1850                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1851                                           "wrong action order, port_id should "
1852                                           "be after pop VLAN action");
1853         if (!attr->transfer && priv->representor)
1854                 return rte_flow_error_set(error, ENOTSUP,
1855                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1856                                           "pop vlan action for VF representor "
1857                                           "not supported on NIC table");
1858         return 0;
1859 }
1860
1861 /**
1862  * Get VLAN default info from vlan match info.
1863  *
1864  * @param[in] items
1865  *   the list of item specifications.
1866  * @param[out] vlan
1867  *   pointer VLAN info to fill to.
1868  *
1869  * @return
1870  *   0 on success, a negative errno value otherwise and rte_errno is set.
1871  */
1872 static void
1873 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
1874                                   struct rte_vlan_hdr *vlan)
1875 {
1876         const struct rte_flow_item_vlan nic_mask = {
1877                 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
1878                                 MLX5DV_FLOW_VLAN_VID_MASK),
1879                 .inner_type = RTE_BE16(0xffff),
1880         };
1881
1882         if (items == NULL)
1883                 return;
1884         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1885                 int type = items->type;
1886
1887                 if (type == RTE_FLOW_ITEM_TYPE_VLAN ||
1888                     type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
1889                         break;
1890         }
1891         if (items->type != RTE_FLOW_ITEM_TYPE_END) {
1892                 const struct rte_flow_item_vlan *vlan_m = items->mask;
1893                 const struct rte_flow_item_vlan *vlan_v = items->spec;
1894
1895                 /* If VLAN item in pattern doesn't contain data, return here. */
1896                 if (!vlan_v)
1897                         return;
1898                 if (!vlan_m)
1899                         vlan_m = &nic_mask;
1900                 /* Only full match values are accepted */
1901                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
1902                      MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
1903                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
1904                         vlan->vlan_tci |=
1905                                 rte_be_to_cpu_16(vlan_v->tci &
1906                                                  MLX5DV_FLOW_VLAN_PCP_MASK_BE);
1907                 }
1908                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
1909                      MLX5DV_FLOW_VLAN_VID_MASK_BE) {
1910                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
1911                         vlan->vlan_tci |=
1912                                 rte_be_to_cpu_16(vlan_v->tci &
1913                                                  MLX5DV_FLOW_VLAN_VID_MASK_BE);
1914                 }
1915                 if (vlan_m->inner_type == nic_mask.inner_type)
1916                         vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
1917                                                            vlan_m->inner_type);
1918         }
1919 }
1920
1921 /**
1922  * Validate the push VLAN action.
1923  *
1924  * @param[in] dev
1925  *   Pointer to the rte_eth_dev structure.
1926  * @param[in] action_flags
1927  *   Holds the actions detected until now.
1928  * @param[in] item_flags
1929  *   The items found in this flow rule.
1930  * @param[in] action
1931  *   Pointer to the action structure.
1932  * @param[in] attr
1933  *   Pointer to flow attributes
1934  * @param[out] error
1935  *   Pointer to error structure.
1936  *
1937  * @return
1938  *   0 on success, a negative errno value otherwise and rte_errno is set.
1939  */
1940 static int
1941 flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
1942                                   uint64_t action_flags,
1943                                   const struct rte_flow_item_vlan *vlan_m,
1944                                   const struct rte_flow_action *action,
1945                                   const struct rte_flow_attr *attr,
1946                                   struct rte_flow_error *error)
1947 {
1948         const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
1949         const struct mlx5_priv *priv = dev->data->dev_private;
1950
1951         if (!attr->transfer && attr->ingress)
1952                 return rte_flow_error_set(error, ENOTSUP,
1953                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1954                                           NULL,
1955                                           "push VLAN action not supported for "
1956                                           "ingress");
1957         if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
1958             push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
1959                 return rte_flow_error_set(error, EINVAL,
1960                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1961                                           "invalid vlan ethertype");
1962         if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
1963                 return rte_flow_error_set(error, ENOTSUP,
1964                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1965                                           "no support for multiple VLAN "
1966                                           "actions");
1967         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
1968                 return rte_flow_error_set(error, EINVAL,
1969                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1970                                           "wrong action order, port_id should "
1971                                           "be after push VLAN");
1972         if (!attr->transfer && priv->representor)
1973                 return rte_flow_error_set(error, ENOTSUP,
1974                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1975                                           "push vlan action for VF representor "
1976                                           "not supported on NIC table");
1977         if (vlan_m &&
1978             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) &&
1979             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) !=
1980                 MLX5DV_FLOW_VLAN_PCP_MASK_BE &&
1981             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) &&
1982             !(mlx5_flow_find_action
1983                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)))
1984                 return rte_flow_error_set(error, EINVAL,
1985                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1986                                           "not full match mask on VLAN PCP and "
1987                                           "there is no of_set_vlan_pcp action, "
1988                                           "push VLAN action cannot figure out "
1989                                           "PCP value");
1990         if (vlan_m &&
1991             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) &&
1992             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) !=
1993                 MLX5DV_FLOW_VLAN_VID_MASK_BE &&
1994             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) &&
1995             !(mlx5_flow_find_action
1996                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)))
1997                 return rte_flow_error_set(error, EINVAL,
1998                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1999                                           "not full match mask on VLAN VID and "
2000                                           "there is no of_set_vlan_vid action, "
2001                                           "push VLAN action cannot figure out "
2002                                           "VID value");
2003         (void)attr;
2004         return 0;
2005 }
2006
2007 /**
2008  * Validate the set VLAN PCP.
2009  *
2010  * @param[in] action_flags
2011  *   Holds the actions detected until now.
2012  * @param[in] actions
2013  *   Pointer to the list of actions remaining in the flow rule.
2014  * @param[out] error
2015  *   Pointer to error structure.
2016  *
2017  * @return
2018  *   0 on success, a negative errno value otherwise and rte_errno is set.
2019  */
2020 static int
2021 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
2022                                      const struct rte_flow_action actions[],
2023                                      struct rte_flow_error *error)
2024 {
2025         const struct rte_flow_action *action = actions;
2026         const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
2027
2028         if (conf->vlan_pcp > 7)
2029                 return rte_flow_error_set(error, EINVAL,
2030                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2031                                           "VLAN PCP value is too big");
2032         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
2033                 return rte_flow_error_set(error, ENOTSUP,
2034                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2035                                           "set VLAN PCP action must follow "
2036                                           "the push VLAN action");
2037         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
2038                 return rte_flow_error_set(error, ENOTSUP,
2039                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2040                                           "Multiple VLAN PCP modification are "
2041                                           "not supported");
2042         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2043                 return rte_flow_error_set(error, EINVAL,
2044                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2045                                           "wrong action order, port_id should "
2046                                           "be after set VLAN PCP");
2047         return 0;
2048 }
2049
2050 /**
2051  * Validate the set VLAN VID.
2052  *
2053  * @param[in] item_flags
2054  *   Holds the items detected in this rule.
2055  * @param[in] action_flags
2056  *   Holds the actions detected until now.
2057  * @param[in] actions
2058  *   Pointer to the list of actions remaining in the flow rule.
2059  * @param[out] error
2060  *   Pointer to error structure.
2061  *
2062  * @return
2063  *   0 on success, a negative errno value otherwise and rte_errno is set.
2064  */
2065 static int
2066 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
2067                                      uint64_t action_flags,
2068                                      const struct rte_flow_action actions[],
2069                                      struct rte_flow_error *error)
2070 {
2071         const struct rte_flow_action *action = actions;
2072         const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
2073
2074         if (rte_be_to_cpu_16(conf->vlan_vid) > 0xFFE)
2075                 return rte_flow_error_set(error, EINVAL,
2076                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2077                                           "VLAN VID value is too big");
2078         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
2079             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2080                 return rte_flow_error_set(error, ENOTSUP,
2081                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2082                                           "set VLAN VID action must follow push"
2083                                           " VLAN action or match on VLAN item");
2084         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
2085                 return rte_flow_error_set(error, ENOTSUP,
2086                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2087                                           "Multiple VLAN VID modifications are "
2088                                           "not supported");
2089         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2090                 return rte_flow_error_set(error, EINVAL,
2091                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2092                                           "wrong action order, port_id should "
2093                                           "be after set VLAN VID");
2094         return 0;
2095 }
2096
2097 /*
2098  * Validate the FLAG action.
2099  *
2100  * @param[in] dev
2101  *   Pointer to the rte_eth_dev structure.
2102  * @param[in] action_flags
2103  *   Holds the actions detected until now.
2104  * @param[in] attr
2105  *   Pointer to flow attributes
2106  * @param[out] error
2107  *   Pointer to error structure.
2108  *
2109  * @return
2110  *   0 on success, a negative errno value otherwise and rte_errno is set.
2111  */
2112 static int
2113 flow_dv_validate_action_flag(struct rte_eth_dev *dev,
2114                              uint64_t action_flags,
2115                              const struct rte_flow_attr *attr,
2116                              struct rte_flow_error *error)
2117 {
2118         struct mlx5_priv *priv = dev->data->dev_private;
2119         struct mlx5_dev_config *config = &priv->config;
2120         int ret;
2121
2122         /* Fall back if no extended metadata register support. */
2123         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
2124                 return mlx5_flow_validate_action_flag(action_flags, attr,
2125                                                       error);
2126         /* Extensive metadata mode requires registers. */
2127         if (!mlx5_flow_ext_mreg_supported(dev))
2128                 return rte_flow_error_set(error, ENOTSUP,
2129                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2130                                           "no metadata registers "
2131                                           "to support flag action");
2132         if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT))
2133                 return rte_flow_error_set(error, ENOTSUP,
2134                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2135                                           "extended metadata register"
2136                                           " isn't available");
2137         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
2138         if (ret < 0)
2139                 return ret;
2140         MLX5_ASSERT(ret > 0);
2141         if (action_flags & MLX5_FLOW_ACTION_MARK)
2142                 return rte_flow_error_set(error, EINVAL,
2143                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2144                                           "can't mark and flag in same flow");
2145         if (action_flags & MLX5_FLOW_ACTION_FLAG)
2146                 return rte_flow_error_set(error, EINVAL,
2147                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2148                                           "can't have 2 flag"
2149                                           " actions in same flow");
2150         return 0;
2151 }
2152
2153 /**
2154  * Validate MARK action.
2155  *
2156  * @param[in] dev
2157  *   Pointer to the rte_eth_dev structure.
2158  * @param[in] action
2159  *   Pointer to action.
2160  * @param[in] action_flags
2161  *   Holds the actions detected until now.
2162  * @param[in] attr
2163  *   Pointer to flow attributes
2164  * @param[out] error
2165  *   Pointer to error structure.
2166  *
2167  * @return
2168  *   0 on success, a negative errno value otherwise and rte_errno is set.
2169  */
2170 static int
2171 flow_dv_validate_action_mark(struct rte_eth_dev *dev,
2172                              const struct rte_flow_action *action,
2173                              uint64_t action_flags,
2174                              const struct rte_flow_attr *attr,
2175                              struct rte_flow_error *error)
2176 {
2177         struct mlx5_priv *priv = dev->data->dev_private;
2178         struct mlx5_dev_config *config = &priv->config;
2179         const struct rte_flow_action_mark *mark = action->conf;
2180         int ret;
2181
2182         /* Fall back if no extended metadata register support. */
2183         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
2184                 return mlx5_flow_validate_action_mark(action, action_flags,
2185                                                       attr, error);
2186         /* Extensive metadata mode requires registers. */
2187         if (!mlx5_flow_ext_mreg_supported(dev))
2188                 return rte_flow_error_set(error, ENOTSUP,
2189                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2190                                           "no metadata registers "
2191                                           "to support mark action");
2192         if (!priv->sh->dv_mark_mask)
2193                 return rte_flow_error_set(error, ENOTSUP,
2194                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2195                                           "extended metadata register"
2196                                           " isn't available");
2197         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
2198         if (ret < 0)
2199                 return ret;
2200         MLX5_ASSERT(ret > 0);
2201         if (!mark)
2202                 return rte_flow_error_set(error, EINVAL,
2203                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2204                                           "configuration cannot be null");
2205         if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask))
2206                 return rte_flow_error_set(error, EINVAL,
2207                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2208                                           &mark->id,
2209                                           "mark id exceeds the limit");
2210         if (action_flags & MLX5_FLOW_ACTION_FLAG)
2211                 return rte_flow_error_set(error, EINVAL,
2212                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2213                                           "can't flag and mark in same flow");
2214         if (action_flags & MLX5_FLOW_ACTION_MARK)
2215                 return rte_flow_error_set(error, EINVAL,
2216                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2217                                           "can't have 2 mark actions in same"
2218                                           " flow");
2219         return 0;
2220 }
2221
2222 /**
2223  * Validate SET_META action.
2224  *
2225  * @param[in] dev
2226  *   Pointer to the rte_eth_dev structure.
2227  * @param[in] action
2228  *   Pointer to the action structure.
2229  * @param[in] action_flags
2230  *   Holds the actions detected until now.
2231  * @param[in] attr
2232  *   Pointer to flow attributes
2233  * @param[out] error
2234  *   Pointer to error structure.
2235  *
2236  * @return
2237  *   0 on success, a negative errno value otherwise and rte_errno is set.
2238  */
2239 static int
2240 flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
2241                                  const struct rte_flow_action *action,
2242                                  uint64_t action_flags __rte_unused,
2243                                  const struct rte_flow_attr *attr,
2244                                  struct rte_flow_error *error)
2245 {
2246         const struct rte_flow_action_set_meta *conf;
2247         uint32_t nic_mask = UINT32_MAX;
2248         int reg;
2249
2250         if (!mlx5_flow_ext_mreg_supported(dev))
2251                 return rte_flow_error_set(error, ENOTSUP,
2252                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2253                                           "extended metadata register"
2254                                           " isn't supported");
2255         reg = flow_dv_get_metadata_reg(dev, attr, error);
2256         if (reg < 0)
2257                 return reg;
2258         if (reg != REG_A && reg != REG_B) {
2259                 struct mlx5_priv *priv = dev->data->dev_private;
2260
2261                 nic_mask = priv->sh->dv_meta_mask;
2262         }
2263         if (!(action->conf))
2264                 return rte_flow_error_set(error, EINVAL,
2265                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2266                                           "configuration cannot be null");
2267         conf = (const struct rte_flow_action_set_meta *)action->conf;
2268         if (!conf->mask)
2269                 return rte_flow_error_set(error, EINVAL,
2270                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2271                                           "zero mask doesn't have any effect");
2272         if (conf->mask & ~nic_mask)
2273                 return rte_flow_error_set(error, EINVAL,
2274                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2275                                           "meta data must be within reg C0");
2276         return 0;
2277 }
2278
2279 /**
2280  * Validate SET_TAG action.
2281  *
2282  * @param[in] dev
2283  *   Pointer to the rte_eth_dev structure.
2284  * @param[in] action
2285  *   Pointer to the action structure.
2286  * @param[in] action_flags
2287  *   Holds the actions detected until now.
2288  * @param[in] attr
2289  *   Pointer to flow attributes
2290  * @param[out] error
2291  *   Pointer to error structure.
2292  *
2293  * @return
2294  *   0 on success, a negative errno value otherwise and rte_errno is set.
2295  */
2296 static int
2297 flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
2298                                 const struct rte_flow_action *action,
2299                                 uint64_t action_flags,
2300                                 const struct rte_flow_attr *attr,
2301                                 struct rte_flow_error *error)
2302 {
2303         const struct rte_flow_action_set_tag *conf;
2304         const uint64_t terminal_action_flags =
2305                 MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE |
2306                 MLX5_FLOW_ACTION_RSS;
2307         int ret;
2308
2309         if (!mlx5_flow_ext_mreg_supported(dev))
2310                 return rte_flow_error_set(error, ENOTSUP,
2311                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2312                                           "extensive metadata register"
2313                                           " isn't supported");
2314         if (!(action->conf))
2315                 return rte_flow_error_set(error, EINVAL,
2316                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2317                                           "configuration cannot be null");
2318         conf = (const struct rte_flow_action_set_tag *)action->conf;
2319         if (!conf->mask)
2320                 return rte_flow_error_set(error, EINVAL,
2321                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2322                                           "zero mask doesn't have any effect");
2323         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
2324         if (ret < 0)
2325                 return ret;
2326         if (!attr->transfer && attr->ingress &&
2327             (action_flags & terminal_action_flags))
2328                 return rte_flow_error_set(error, EINVAL,
2329                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2330                                           "set_tag has no effect"
2331                                           " with terminal actions");
2332         return 0;
2333 }
2334
2335 /**
2336  * Validate count action.
2337  *
2338  * @param[in] dev
2339  *   Pointer to rte_eth_dev structure.
2340  * @param[out] error
2341  *   Pointer to error structure.
2342  *
2343  * @return
2344  *   0 on success, a negative errno value otherwise and rte_errno is set.
2345  */
2346 static int
2347 flow_dv_validate_action_count(struct rte_eth_dev *dev,
2348                               struct rte_flow_error *error)
2349 {
2350         struct mlx5_priv *priv = dev->data->dev_private;
2351
2352         if (!priv->config.devx)
2353                 goto notsup_err;
2354 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
2355         return 0;
2356 #endif
2357 notsup_err:
2358         return rte_flow_error_set
2359                       (error, ENOTSUP,
2360                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2361                        NULL,
2362                        "count action not supported");
2363 }
2364
2365 /**
2366  * Validate the L2 encap action.
2367  *
2368  * @param[in] dev
2369  *   Pointer to the rte_eth_dev structure.
2370  * @param[in] action_flags
2371  *   Holds the actions detected until now.
2372  * @param[in] action
2373  *   Pointer to the action structure.
2374  * @param[in] attr
2375  *   Pointer to flow attributes.
2376  * @param[out] error
2377  *   Pointer to error structure.
2378  *
2379  * @return
2380  *   0 on success, a negative errno value otherwise and rte_errno is set.
2381  */
2382 static int
2383 flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
2384                                  uint64_t action_flags,
2385                                  const struct rte_flow_action *action,
2386                                  const struct rte_flow_attr *attr,
2387                                  struct rte_flow_error *error)
2388 {
2389         const struct mlx5_priv *priv = dev->data->dev_private;
2390
2391         if (!(action->conf))
2392                 return rte_flow_error_set(error, EINVAL,
2393                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2394                                           "configuration cannot be null");
2395         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
2396                 return rte_flow_error_set(error, EINVAL,
2397                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2398                                           "can only have a single encap action "
2399                                           "in a flow");
2400         if (!attr->transfer && priv->representor)
2401                 return rte_flow_error_set(error, ENOTSUP,
2402                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2403                                           "encap action for VF representor "
2404                                           "not supported on NIC table");
2405         return 0;
2406 }
2407
2408 /**
2409  * Validate a decap action.
2410  *
2411  * @param[in] dev
2412  *   Pointer to the rte_eth_dev structure.
2413  * @param[in] action_flags
2414  *   Holds the actions detected until now.
2415  * @param[in] attr
2416  *   Pointer to flow attributes
2417  * @param[out] error
2418  *   Pointer to error structure.
2419  *
2420  * @return
2421  *   0 on success, a negative errno value otherwise and rte_errno is set.
2422  */
2423 static int
2424 flow_dv_validate_action_decap(struct rte_eth_dev *dev,
2425                               uint64_t action_flags,
2426                               const struct rte_flow_attr *attr,
2427                               struct rte_flow_error *error)
2428 {
2429         const struct mlx5_priv *priv = dev->data->dev_private;
2430
2431         if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
2432                 return rte_flow_error_set(error, ENOTSUP,
2433                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2434                                           action_flags &
2435                                           MLX5_FLOW_ACTION_DECAP ? "can only "
2436                                           "have a single decap action" : "decap "
2437                                           "after encap is not supported");
2438         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
2439                 return rte_flow_error_set(error, EINVAL,
2440                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2441                                           "can't have decap action after"
2442                                           " modify action");
2443         if (attr->egress)
2444                 return rte_flow_error_set(error, ENOTSUP,
2445                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2446                                           NULL,
2447                                           "decap action not supported for "
2448                                           "egress");
2449         if (!attr->transfer && priv->representor)
2450                 return rte_flow_error_set(error, ENOTSUP,
2451                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2452                                           "decap action for VF representor "
2453                                           "not supported on NIC table");
2454         return 0;
2455 }
2456
2457 const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,};
2458
2459 /**
2460  * Validate the raw encap and decap actions.
2461  *
2462  * @param[in] dev
2463  *   Pointer to the rte_eth_dev structure.
2464  * @param[in] decap
2465  *   Pointer to the decap action.
2466  * @param[in] encap
2467  *   Pointer to the encap action.
2468  * @param[in] attr
2469  *   Pointer to flow attributes
2470  * @param[in/out] action_flags
2471  *   Holds the actions detected until now.
2472  * @param[out] actions_n
2473  *   pointer to the number of actions counter.
2474  * @param[out] error
2475  *   Pointer to error structure.
2476  *
2477  * @return
2478  *   0 on success, a negative errno value otherwise and rte_errno is set.
2479  */
2480 static int
2481 flow_dv_validate_action_raw_encap_decap
2482         (struct rte_eth_dev *dev,
2483          const struct rte_flow_action_raw_decap *decap,
2484          const struct rte_flow_action_raw_encap *encap,
2485          const struct rte_flow_attr *attr, uint64_t *action_flags,
2486          int *actions_n, struct rte_flow_error *error)
2487 {
2488         const struct mlx5_priv *priv = dev->data->dev_private;
2489         int ret;
2490
2491         if (encap && (!encap->size || !encap->data))
2492                 return rte_flow_error_set(error, EINVAL,
2493                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2494                                           "raw encap data cannot be empty");
2495         if (decap && encap) {
2496                 if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE &&
2497                     encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
2498                         /* L3 encap. */
2499                         decap = NULL;
2500                 else if (encap->size <=
2501                            MLX5_ENCAPSULATION_DECISION_SIZE &&
2502                            decap->size >
2503                            MLX5_ENCAPSULATION_DECISION_SIZE)
2504                         /* L3 decap. */
2505                         encap = NULL;
2506                 else if (encap->size >
2507                            MLX5_ENCAPSULATION_DECISION_SIZE &&
2508                            decap->size >
2509                            MLX5_ENCAPSULATION_DECISION_SIZE)
2510                         /* 2 L2 actions: encap and decap. */
2511                         ;
2512                 else
2513                         return rte_flow_error_set(error,
2514                                 ENOTSUP,
2515                                 RTE_FLOW_ERROR_TYPE_ACTION,
2516                                 NULL, "unsupported too small "
2517                                 "raw decap and too small raw "
2518                                 "encap combination");
2519         }
2520         if (decap) {
2521                 ret = flow_dv_validate_action_decap(dev, *action_flags, attr,
2522                                                     error);
2523                 if (ret < 0)
2524                         return ret;
2525                 *action_flags |= MLX5_FLOW_ACTION_DECAP;
2526                 ++(*actions_n);
2527         }
2528         if (encap) {
2529                 if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE)
2530                         return rte_flow_error_set(error, ENOTSUP,
2531                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2532                                                   NULL,
2533                                                   "small raw encap size");
2534                 if (*action_flags & MLX5_FLOW_ACTION_ENCAP)
2535                         return rte_flow_error_set(error, EINVAL,
2536                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2537                                                   NULL,
2538                                                   "more than one encap action");
2539                 if (!attr->transfer && priv->representor)
2540                         return rte_flow_error_set
2541                                         (error, ENOTSUP,
2542                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2543                                          "encap action for VF representor "
2544                                          "not supported on NIC table");
2545                 *action_flags |= MLX5_FLOW_ACTION_ENCAP;
2546                 ++(*actions_n);
2547         }
2548         return 0;
2549 }
2550
2551 /**
2552  * Find existing encap/decap resource or create and register a new one.
2553  *
2554  * @param[in, out] dev
2555  *   Pointer to rte_eth_dev structure.
2556  * @param[in, out] resource
2557  *   Pointer to encap/decap resource.
2558  * @parm[in, out] dev_flow
2559  *   Pointer to the dev_flow.
2560  * @param[out] error
2561  *   pointer to error structure.
2562  *
2563  * @return
2564  *   0 on success otherwise -errno and errno is set.
2565  */
2566 static int
2567 flow_dv_encap_decap_resource_register
2568                         (struct rte_eth_dev *dev,
2569                          struct mlx5_flow_dv_encap_decap_resource *resource,
2570                          struct mlx5_flow *dev_flow,
2571                          struct rte_flow_error *error)
2572 {
2573         struct mlx5_priv *priv = dev->data->dev_private;
2574         struct mlx5_dev_ctx_shared *sh = priv->sh;
2575         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
2576         struct mlx5dv_dr_domain *domain;
2577         uint32_t idx = 0;
2578         int ret;
2579
2580         resource->flags = dev_flow->dv.group ? 0 : 1;
2581         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
2582                 domain = sh->fdb_domain;
2583         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
2584                 domain = sh->rx_domain;
2585         else
2586                 domain = sh->tx_domain;
2587         /* Lookup a matching resource from cache. */
2588         ILIST_FOREACH(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], sh->encaps_decaps, idx,
2589                       cache_resource, next) {
2590                 if (resource->reformat_type == cache_resource->reformat_type &&
2591                     resource->ft_type == cache_resource->ft_type &&
2592                     resource->flags == cache_resource->flags &&
2593                     resource->size == cache_resource->size &&
2594                     !memcmp((const void *)resource->buf,
2595                             (const void *)cache_resource->buf,
2596                             resource->size)) {
2597                         DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
2598                                 (void *)cache_resource,
2599                                 rte_atomic32_read(&cache_resource->refcnt));
2600                         rte_atomic32_inc(&cache_resource->refcnt);
2601                         dev_flow->handle->dvh.rix_encap_decap = idx;
2602                         dev_flow->dv.encap_decap = cache_resource;
2603                         return 0;
2604                 }
2605         }
2606         /* Register new encap/decap resource. */
2607         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
2608                                        &dev_flow->handle->dvh.rix_encap_decap);
2609         if (!cache_resource)
2610                 return rte_flow_error_set(error, ENOMEM,
2611                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2612                                           "cannot allocate resource memory");
2613         *cache_resource = *resource;
2614         ret = mlx5_flow_os_create_flow_action_packet_reformat
2615                                         (sh->ctx, domain, cache_resource,
2616                                          &cache_resource->action);
2617         if (ret) {
2618                 rte_free(cache_resource);
2619                 return rte_flow_error_set(error, ENOMEM,
2620                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2621                                           NULL, "cannot create action");
2622         }
2623         rte_atomic32_init(&cache_resource->refcnt);
2624         rte_atomic32_inc(&cache_resource->refcnt);
2625         ILIST_INSERT(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], &sh->encaps_decaps,
2626                      dev_flow->handle->dvh.rix_encap_decap, cache_resource,
2627                      next);
2628         dev_flow->dv.encap_decap = cache_resource;
2629         DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
2630                 (void *)cache_resource,
2631                 rte_atomic32_read(&cache_resource->refcnt));
2632         return 0;
2633 }
2634
2635 /**
2636  * Find existing table jump resource or create and register a new one.
2637  *
2638  * @param[in, out] dev
2639  *   Pointer to rte_eth_dev structure.
2640  * @param[in, out] tbl
2641  *   Pointer to flow table resource.
2642  * @parm[in, out] dev_flow
2643  *   Pointer to the dev_flow.
2644  * @param[out] error
2645  *   pointer to error structure.
2646  *
2647  * @return
2648  *   0 on success otherwise -errno and errno is set.
2649  */
2650 static int
2651 flow_dv_jump_tbl_resource_register
2652                         (struct rte_eth_dev *dev __rte_unused,
2653                          struct mlx5_flow_tbl_resource *tbl,
2654                          struct mlx5_flow *dev_flow,
2655                          struct rte_flow_error *error)
2656 {
2657         struct mlx5_flow_tbl_data_entry *tbl_data =
2658                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
2659         int cnt, ret;
2660
2661         MLX5_ASSERT(tbl);
2662         cnt = rte_atomic32_read(&tbl_data->jump.refcnt);
2663         if (!cnt) {
2664                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
2665                                 (tbl->obj, &tbl_data->jump.action);
2666                 if (ret)
2667                         return rte_flow_error_set(error, ENOMEM,
2668                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2669                                         NULL, "cannot create jump action");
2670                 DRV_LOG(DEBUG, "new jump table resource %p: refcnt %d++",
2671                         (void *)&tbl_data->jump, cnt);
2672         } else {
2673                 /* old jump should not make the table ref++. */
2674                 flow_dv_tbl_resource_release(dev, &tbl_data->tbl);
2675                 MLX5_ASSERT(tbl_data->jump.action);
2676                 DRV_LOG(DEBUG, "existed jump table resource %p: refcnt %d++",
2677                         (void *)&tbl_data->jump, cnt);
2678         }
2679         rte_atomic32_inc(&tbl_data->jump.refcnt);
2680         dev_flow->handle->rix_jump = tbl_data->idx;
2681         dev_flow->dv.jump = &tbl_data->jump;
2682         return 0;
2683 }
2684
2685 /**
2686  * Find existing default miss resource or create and register a new one.
2687  *
2688  * @param[in, out] dev
2689  *   Pointer to rte_eth_dev structure.
2690  * @param[out] error
2691  *   pointer to error structure.
2692  *
2693  * @return
2694  *   0 on success otherwise -errno and errno is set.
2695  */
2696 static int
2697 flow_dv_default_miss_resource_register(struct rte_eth_dev *dev,
2698                 struct rte_flow_error *error)
2699 {
2700         struct mlx5_priv *priv = dev->data->dev_private;
2701         struct mlx5_dev_ctx_shared *sh = priv->sh;
2702         struct mlx5_flow_default_miss_resource *cache_resource =
2703                         &sh->default_miss;
2704         int cnt = rte_atomic32_read(&cache_resource->refcnt);
2705
2706         if (!cnt) {
2707                 MLX5_ASSERT(cache_resource->action);
2708                 cache_resource->action =
2709                 mlx5_glue->dr_create_flow_action_default_miss();
2710                 if (!cache_resource->action)
2711                         return rte_flow_error_set(error, ENOMEM,
2712                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2713                                         "cannot create default miss action");
2714                 DRV_LOG(DEBUG, "new default miss resource %p: refcnt %d++",
2715                                 (void *)cache_resource->action, cnt);
2716         }
2717         rte_atomic32_inc(&cache_resource->refcnt);
2718         return 0;
2719 }
2720
2721 /**
2722  * Find existing table port ID resource or create and register a new one.
2723  *
2724  * @param[in, out] dev
2725  *   Pointer to rte_eth_dev structure.
2726  * @param[in, out] resource
2727  *   Pointer to port ID action resource.
2728  * @parm[in, out] dev_flow
2729  *   Pointer to the dev_flow.
2730  * @param[out] error
2731  *   pointer to error structure.
2732  *
2733  * @return
2734  *   0 on success otherwise -errno and errno is set.
2735  */
2736 static int
2737 flow_dv_port_id_action_resource_register
2738                         (struct rte_eth_dev *dev,
2739                          struct mlx5_flow_dv_port_id_action_resource *resource,
2740                          struct mlx5_flow *dev_flow,
2741                          struct rte_flow_error *error)
2742 {
2743         struct mlx5_priv *priv = dev->data->dev_private;
2744         struct mlx5_dev_ctx_shared *sh = priv->sh;
2745         struct mlx5_flow_dv_port_id_action_resource *cache_resource;
2746         uint32_t idx = 0;
2747         int ret;
2748
2749         /* Lookup a matching resource from cache. */
2750         ILIST_FOREACH(sh->ipool[MLX5_IPOOL_PORT_ID], sh->port_id_action_list,
2751                       idx, cache_resource, next) {
2752                 if (resource->port_id == cache_resource->port_id) {
2753                         DRV_LOG(DEBUG, "port id action resource resource %p: "
2754                                 "refcnt %d++",
2755                                 (void *)cache_resource,
2756                                 rte_atomic32_read(&cache_resource->refcnt));
2757                         rte_atomic32_inc(&cache_resource->refcnt);
2758                         dev_flow->handle->rix_port_id_action = idx;
2759                         dev_flow->dv.port_id_action = cache_resource;
2760                         return 0;
2761                 }
2762         }
2763         /* Register new port id action resource. */
2764         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID],
2765                                        &dev_flow->handle->rix_port_id_action);
2766         if (!cache_resource)
2767                 return rte_flow_error_set(error, ENOMEM,
2768                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2769                                           "cannot allocate resource memory");
2770         *cache_resource = *resource;
2771         ret = mlx5_flow_os_create_flow_action_dest_port
2772                                 (priv->sh->fdb_domain, resource->port_id,
2773                                  &cache_resource->action);
2774         if (ret) {
2775                 rte_free(cache_resource);
2776                 return rte_flow_error_set(error, ENOMEM,
2777                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2778                                           NULL, "cannot create action");
2779         }
2780         rte_atomic32_init(&cache_resource->refcnt);
2781         rte_atomic32_inc(&cache_resource->refcnt);
2782         ILIST_INSERT(sh->ipool[MLX5_IPOOL_PORT_ID], &sh->port_id_action_list,
2783                      dev_flow->handle->rix_port_id_action, cache_resource,
2784                      next);
2785         dev_flow->dv.port_id_action = cache_resource;
2786         DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++",
2787                 (void *)cache_resource,
2788                 rte_atomic32_read(&cache_resource->refcnt));
2789         return 0;
2790 }
2791
2792 /**
2793  * Find existing push vlan resource or create and register a new one.
2794  *
2795  * @param [in, out] dev
2796  *   Pointer to rte_eth_dev structure.
2797  * @param[in, out] resource
2798  *   Pointer to port ID action resource.
2799  * @parm[in, out] dev_flow
2800  *   Pointer to the dev_flow.
2801  * @param[out] error
2802  *   pointer to error structure.
2803  *
2804  * @return
2805  *   0 on success otherwise -errno and errno is set.
2806  */
2807 static int
2808 flow_dv_push_vlan_action_resource_register
2809                        (struct rte_eth_dev *dev,
2810                         struct mlx5_flow_dv_push_vlan_action_resource *resource,
2811                         struct mlx5_flow *dev_flow,
2812                         struct rte_flow_error *error)
2813 {
2814         struct mlx5_priv *priv = dev->data->dev_private;
2815         struct mlx5_dev_ctx_shared *sh = priv->sh;
2816         struct mlx5_flow_dv_push_vlan_action_resource *cache_resource;
2817         struct mlx5dv_dr_domain *domain;
2818         uint32_t idx = 0;
2819         int ret;
2820
2821         /* Lookup a matching resource from cache. */
2822         ILIST_FOREACH(sh->ipool[MLX5_IPOOL_PUSH_VLAN],
2823                       sh->push_vlan_action_list, idx, cache_resource, next) {
2824                 if (resource->vlan_tag == cache_resource->vlan_tag &&
2825                     resource->ft_type == cache_resource->ft_type) {
2826                         DRV_LOG(DEBUG, "push-VLAN action resource resource %p: "
2827                                 "refcnt %d++",
2828                                 (void *)cache_resource,
2829                                 rte_atomic32_read(&cache_resource->refcnt));
2830                         rte_atomic32_inc(&cache_resource->refcnt);
2831                         dev_flow->handle->dvh.rix_push_vlan = idx;
2832                         dev_flow->dv.push_vlan_res = cache_resource;
2833                         return 0;
2834                 }
2835         }
2836         /* Register new push_vlan action resource. */
2837         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN],
2838                                        &dev_flow->handle->dvh.rix_push_vlan);
2839         if (!cache_resource)
2840                 return rte_flow_error_set(error, ENOMEM,
2841                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2842                                           "cannot allocate resource memory");
2843         *cache_resource = *resource;
2844         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
2845                 domain = sh->fdb_domain;
2846         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
2847                 domain = sh->rx_domain;
2848         else
2849                 domain = sh->tx_domain;
2850         ret = mlx5_flow_os_create_flow_action_push_vlan
2851                                         (domain, resource->vlan_tag,
2852                                          &cache_resource->action);
2853         if (ret) {
2854                 rte_free(cache_resource);
2855                 return rte_flow_error_set(error, ENOMEM,
2856                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2857                                           NULL, "cannot create action");
2858         }
2859         rte_atomic32_init(&cache_resource->refcnt);
2860         rte_atomic32_inc(&cache_resource->refcnt);
2861         ILIST_INSERT(sh->ipool[MLX5_IPOOL_PUSH_VLAN],
2862                      &sh->push_vlan_action_list,
2863                      dev_flow->handle->dvh.rix_push_vlan,
2864                      cache_resource, next);
2865         dev_flow->dv.push_vlan_res = cache_resource;
2866         DRV_LOG(DEBUG, "new push vlan action resource %p: refcnt %d++",
2867                 (void *)cache_resource,
2868                 rte_atomic32_read(&cache_resource->refcnt));
2869         return 0;
2870 }
2871 /**
2872  * Get the size of specific rte_flow_item_type
2873  *
2874  * @param[in] item_type
2875  *   Tested rte_flow_item_type.
2876  *
2877  * @return
2878  *   sizeof struct item_type, 0 if void or irrelevant.
2879  */
2880 static size_t
2881 flow_dv_get_item_len(const enum rte_flow_item_type item_type)
2882 {
2883         size_t retval;
2884
2885         switch (item_type) {
2886         case RTE_FLOW_ITEM_TYPE_ETH:
2887                 retval = sizeof(struct rte_flow_item_eth);
2888                 break;
2889         case RTE_FLOW_ITEM_TYPE_VLAN:
2890                 retval = sizeof(struct rte_flow_item_vlan);
2891                 break;
2892         case RTE_FLOW_ITEM_TYPE_IPV4:
2893                 retval = sizeof(struct rte_flow_item_ipv4);
2894                 break;
2895         case RTE_FLOW_ITEM_TYPE_IPV6:
2896                 retval = sizeof(struct rte_flow_item_ipv6);
2897                 break;
2898         case RTE_FLOW_ITEM_TYPE_UDP:
2899                 retval = sizeof(struct rte_flow_item_udp);
2900                 break;
2901         case RTE_FLOW_ITEM_TYPE_TCP:
2902                 retval = sizeof(struct rte_flow_item_tcp);
2903                 break;
2904         case RTE_FLOW_ITEM_TYPE_VXLAN:
2905                 retval = sizeof(struct rte_flow_item_vxlan);
2906                 break;
2907         case RTE_FLOW_ITEM_TYPE_GRE:
2908                 retval = sizeof(struct rte_flow_item_gre);
2909                 break;
2910         case RTE_FLOW_ITEM_TYPE_NVGRE:
2911                 retval = sizeof(struct rte_flow_item_nvgre);
2912                 break;
2913         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
2914                 retval = sizeof(struct rte_flow_item_vxlan_gpe);
2915                 break;
2916         case RTE_FLOW_ITEM_TYPE_MPLS:
2917                 retval = sizeof(struct rte_flow_item_mpls);
2918                 break;
2919         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
2920         default:
2921                 retval = 0;
2922                 break;
2923         }
2924         return retval;
2925 }
2926
2927 #define MLX5_ENCAP_IPV4_VERSION         0x40
2928 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
2929 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
2930 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
2931 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
2932 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
2933 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
2934
2935 /**
2936  * Convert the encap action data from list of rte_flow_item to raw buffer
2937  *
2938  * @param[in] items
2939  *   Pointer to rte_flow_item objects list.
2940  * @param[out] buf
2941  *   Pointer to the output buffer.
2942  * @param[out] size
2943  *   Pointer to the output buffer size.
2944  * @param[out] error
2945  *   Pointer to the error structure.
2946  *
2947  * @return
2948  *   0 on success, a negative errno value otherwise and rte_errno is set.
2949  */
2950 static int
2951 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
2952                            size_t *size, struct rte_flow_error *error)
2953 {
2954         struct rte_ether_hdr *eth = NULL;
2955         struct rte_vlan_hdr *vlan = NULL;
2956         struct rte_ipv4_hdr *ipv4 = NULL;
2957         struct rte_ipv6_hdr *ipv6 = NULL;
2958         struct rte_udp_hdr *udp = NULL;
2959         struct rte_vxlan_hdr *vxlan = NULL;
2960         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
2961         struct rte_gre_hdr *gre = NULL;
2962         size_t len;
2963         size_t temp_size = 0;
2964
2965         if (!items)
2966                 return rte_flow_error_set(error, EINVAL,
2967                                           RTE_FLOW_ERROR_TYPE_ACTION,
2968                                           NULL, "invalid empty data");
2969         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2970                 len = flow_dv_get_item_len(items->type);
2971                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
2972                         return rte_flow_error_set(error, EINVAL,
2973                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2974                                                   (void *)items->type,
2975                                                   "items total size is too big"
2976                                                   " for encap action");
2977                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
2978                 switch (items->type) {
2979                 case RTE_FLOW_ITEM_TYPE_ETH:
2980                         eth = (struct rte_ether_hdr *)&buf[temp_size];
2981                         break;
2982                 case RTE_FLOW_ITEM_TYPE_VLAN:
2983                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
2984                         if (!eth)
2985                                 return rte_flow_error_set(error, EINVAL,
2986                                                 RTE_FLOW_ERROR_TYPE_ACTION,
2987                                                 (void *)items->type,
2988                                                 "eth header not found");
2989                         if (!eth->ether_type)
2990                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
2991                         break;
2992                 case RTE_FLOW_ITEM_TYPE_IPV4:
2993                         ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
2994                         if (!vlan && !eth)
2995                                 return rte_flow_error_set(error, EINVAL,
2996                                                 RTE_FLOW_ERROR_TYPE_ACTION,
2997                                                 (void *)items->type,
2998                                                 "neither eth nor vlan"
2999                                                 " header found");
3000                         if (vlan && !vlan->eth_proto)
3001                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3002                         else if (eth && !eth->ether_type)
3003                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3004                         if (!ipv4->version_ihl)
3005                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
3006                                                     MLX5_ENCAP_IPV4_IHL_MIN;
3007                         if (!ipv4->time_to_live)
3008                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
3009                         break;
3010                 case RTE_FLOW_ITEM_TYPE_IPV6:
3011                         ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
3012                         if (!vlan && !eth)
3013                                 return rte_flow_error_set(error, EINVAL,
3014                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3015                                                 (void *)items->type,
3016                                                 "neither eth nor vlan"
3017                                                 " header found");
3018                         if (vlan && !vlan->eth_proto)
3019                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3020                         else if (eth && !eth->ether_type)
3021                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3022                         if (!ipv6->vtc_flow)
3023                                 ipv6->vtc_flow =
3024                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
3025                         if (!ipv6->hop_limits)
3026                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
3027                         break;
3028                 case RTE_FLOW_ITEM_TYPE_UDP:
3029                         udp = (struct rte_udp_hdr *)&buf[temp_size];
3030                         if (!ipv4 && !ipv6)
3031                                 return rte_flow_error_set(error, EINVAL,
3032                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3033                                                 (void *)items->type,
3034                                                 "ip header not found");
3035                         if (ipv4 && !ipv4->next_proto_id)
3036                                 ipv4->next_proto_id = IPPROTO_UDP;
3037                         else if (ipv6 && !ipv6->proto)
3038                                 ipv6->proto = IPPROTO_UDP;
3039                         break;
3040                 case RTE_FLOW_ITEM_TYPE_VXLAN:
3041                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
3042                         if (!udp)
3043                                 return rte_flow_error_set(error, EINVAL,
3044                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3045                                                 (void *)items->type,
3046                                                 "udp header not found");
3047                         if (!udp->dst_port)
3048                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
3049                         if (!vxlan->vx_flags)
3050                                 vxlan->vx_flags =
3051                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
3052                         break;
3053                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3054                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
3055                         if (!udp)
3056                                 return rte_flow_error_set(error, EINVAL,
3057                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3058                                                 (void *)items->type,
3059                                                 "udp header not found");
3060                         if (!vxlan_gpe->proto)
3061                                 return rte_flow_error_set(error, EINVAL,
3062                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3063                                                 (void *)items->type,
3064                                                 "next protocol not found");
3065                         if (!udp->dst_port)
3066                                 udp->dst_port =
3067                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
3068                         if (!vxlan_gpe->vx_flags)
3069                                 vxlan_gpe->vx_flags =
3070                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
3071                         break;
3072                 case RTE_FLOW_ITEM_TYPE_GRE:
3073                 case RTE_FLOW_ITEM_TYPE_NVGRE:
3074                         gre = (struct rte_gre_hdr *)&buf[temp_size];
3075                         if (!gre->proto)
3076                                 return rte_flow_error_set(error, EINVAL,
3077                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3078                                                 (void *)items->type,
3079                                                 "next protocol not found");
3080                         if (!ipv4 && !ipv6)
3081                                 return rte_flow_error_set(error, EINVAL,
3082                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3083                                                 (void *)items->type,
3084                                                 "ip header not found");
3085                         if (ipv4 && !ipv4->next_proto_id)
3086                                 ipv4->next_proto_id = IPPROTO_GRE;
3087                         else if (ipv6 && !ipv6->proto)
3088                                 ipv6->proto = IPPROTO_GRE;
3089                         break;
3090                 case RTE_FLOW_ITEM_TYPE_VOID:
3091                         break;
3092                 default:
3093                         return rte_flow_error_set(error, EINVAL,
3094                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3095                                                   (void *)items->type,
3096                                                   "unsupported item type");
3097                         break;
3098                 }
3099                 temp_size += len;
3100         }
3101         *size = temp_size;
3102         return 0;
3103 }
3104
3105 static int
3106 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
3107 {
3108         struct rte_ether_hdr *eth = NULL;
3109         struct rte_vlan_hdr *vlan = NULL;
3110         struct rte_ipv6_hdr *ipv6 = NULL;
3111         struct rte_udp_hdr *udp = NULL;
3112         char *next_hdr;
3113         uint16_t proto;
3114
3115         eth = (struct rte_ether_hdr *)data;
3116         next_hdr = (char *)(eth + 1);
3117         proto = RTE_BE16(eth->ether_type);
3118
3119         /* VLAN skipping */
3120         while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
3121                 vlan = (struct rte_vlan_hdr *)next_hdr;
3122                 proto = RTE_BE16(vlan->eth_proto);
3123                 next_hdr += sizeof(struct rte_vlan_hdr);
3124         }
3125
3126         /* HW calculates IPv4 csum. no need to proceed */
3127         if (proto == RTE_ETHER_TYPE_IPV4)
3128                 return 0;
3129
3130         /* non IPv4/IPv6 header. not supported */
3131         if (proto != RTE_ETHER_TYPE_IPV6) {
3132                 return rte_flow_error_set(error, ENOTSUP,
3133                                           RTE_FLOW_ERROR_TYPE_ACTION,
3134                                           NULL, "Cannot offload non IPv4/IPv6");
3135         }
3136
3137         ipv6 = (struct rte_ipv6_hdr *)next_hdr;
3138
3139         /* ignore non UDP */
3140         if (ipv6->proto != IPPROTO_UDP)
3141                 return 0;
3142
3143         udp = (struct rte_udp_hdr *)(ipv6 + 1);
3144         udp->dgram_cksum = 0;
3145
3146         return 0;
3147 }
3148
3149 /**
3150  * Convert L2 encap action to DV specification.
3151  *
3152  * @param[in] dev
3153  *   Pointer to rte_eth_dev structure.
3154  * @param[in] action
3155  *   Pointer to action structure.
3156  * @param[in, out] dev_flow
3157  *   Pointer to the mlx5_flow.
3158  * @param[in] transfer
3159  *   Mark if the flow is E-Switch flow.
3160  * @param[out] error
3161  *   Pointer to the error structure.
3162  *
3163  * @return
3164  *   0 on success, a negative errno value otherwise and rte_errno is set.
3165  */
3166 static int
3167 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
3168                                const struct rte_flow_action *action,
3169                                struct mlx5_flow *dev_flow,
3170                                uint8_t transfer,
3171                                struct rte_flow_error *error)
3172 {
3173         const struct rte_flow_item *encap_data;
3174         const struct rte_flow_action_raw_encap *raw_encap_data;
3175         struct mlx5_flow_dv_encap_decap_resource res = {
3176                 .reformat_type =
3177                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
3178                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
3179                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
3180         };
3181
3182         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
3183                 raw_encap_data =
3184                         (const struct rte_flow_action_raw_encap *)action->conf;
3185                 res.size = raw_encap_data->size;
3186                 memcpy(res.buf, raw_encap_data->data, res.size);
3187         } else {
3188                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
3189                         encap_data =
3190                                 ((const struct rte_flow_action_vxlan_encap *)
3191                                                 action->conf)->definition;
3192                 else
3193                         encap_data =
3194                                 ((const struct rte_flow_action_nvgre_encap *)
3195                                                 action->conf)->definition;
3196                 if (flow_dv_convert_encap_data(encap_data, res.buf,
3197                                                &res.size, error))
3198                         return -rte_errno;
3199         }
3200         if (flow_dv_zero_encap_udp_csum(res.buf, error))
3201                 return -rte_errno;
3202         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3203                 return rte_flow_error_set(error, EINVAL,
3204                                           RTE_FLOW_ERROR_TYPE_ACTION,
3205                                           NULL, "can't create L2 encap action");
3206         return 0;
3207 }
3208
3209 /**
3210  * Convert L2 decap action to DV specification.
3211  *
3212  * @param[in] dev
3213  *   Pointer to rte_eth_dev structure.
3214  * @param[in, out] dev_flow
3215  *   Pointer to the mlx5_flow.
3216  * @param[in] transfer
3217  *   Mark if the flow is E-Switch flow.
3218  * @param[out] error
3219  *   Pointer to the error structure.
3220  *
3221  * @return
3222  *   0 on success, a negative errno value otherwise and rte_errno is set.
3223  */
3224 static int
3225 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
3226                                struct mlx5_flow *dev_flow,
3227                                uint8_t transfer,
3228                                struct rte_flow_error *error)
3229 {
3230         struct mlx5_flow_dv_encap_decap_resource res = {
3231                 .size = 0,
3232                 .reformat_type =
3233                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
3234                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
3235                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
3236         };
3237
3238         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3239                 return rte_flow_error_set(error, EINVAL,
3240                                           RTE_FLOW_ERROR_TYPE_ACTION,
3241                                           NULL, "can't create L2 decap action");
3242         return 0;
3243 }
3244
3245 /**
3246  * Convert raw decap/encap (L3 tunnel) action to DV specification.
3247  *
3248  * @param[in] dev
3249  *   Pointer to rte_eth_dev structure.
3250  * @param[in] action
3251  *   Pointer to action structure.
3252  * @param[in, out] dev_flow
3253  *   Pointer to the mlx5_flow.
3254  * @param[in] attr
3255  *   Pointer to the flow attributes.
3256  * @param[out] error
3257  *   Pointer to the error structure.
3258  *
3259  * @return
3260  *   0 on success, a negative errno value otherwise and rte_errno is set.
3261  */
3262 static int
3263 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
3264                                 const struct rte_flow_action *action,
3265                                 struct mlx5_flow *dev_flow,
3266                                 const struct rte_flow_attr *attr,
3267                                 struct rte_flow_error *error)
3268 {
3269         const struct rte_flow_action_raw_encap *encap_data;
3270         struct mlx5_flow_dv_encap_decap_resource res;
3271
3272         memset(&res, 0, sizeof(res));
3273         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
3274         res.size = encap_data->size;
3275         memcpy(res.buf, encap_data->data, res.size);
3276         res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
3277                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
3278                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
3279         if (attr->transfer)
3280                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
3281         else
3282                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3283                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
3284         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3285                 return rte_flow_error_set(error, EINVAL,
3286                                           RTE_FLOW_ERROR_TYPE_ACTION,
3287                                           NULL, "can't create encap action");
3288         return 0;
3289 }
3290
3291 /**
3292  * Create action push VLAN.
3293  *
3294  * @param[in] dev
3295  *   Pointer to rte_eth_dev structure.
3296  * @param[in] attr
3297  *   Pointer to the flow attributes.
3298  * @param[in] vlan
3299  *   Pointer to the vlan to push to the Ethernet header.
3300  * @param[in, out] dev_flow
3301  *   Pointer to the mlx5_flow.
3302  * @param[out] error
3303  *   Pointer to the error structure.
3304  *
3305  * @return
3306  *   0 on success, a negative errno value otherwise and rte_errno is set.
3307  */
3308 static int
3309 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
3310                                 const struct rte_flow_attr *attr,
3311                                 const struct rte_vlan_hdr *vlan,
3312                                 struct mlx5_flow *dev_flow,
3313                                 struct rte_flow_error *error)
3314 {
3315         struct mlx5_flow_dv_push_vlan_action_resource res;
3316
3317         memset(&res, 0, sizeof(res));
3318         res.vlan_tag =
3319                 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
3320                                  vlan->vlan_tci);
3321         if (attr->transfer)
3322                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
3323         else
3324                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3325                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
3326         return flow_dv_push_vlan_action_resource_register
3327                                             (dev, &res, dev_flow, error);
3328 }
3329
3330 /**
3331  * Validate the modify-header actions.
3332  *
3333  * @param[in] action_flags
3334  *   Holds the actions detected until now.
3335  * @param[in] action
3336  *   Pointer to the modify action.
3337  * @param[out] error
3338  *   Pointer to error structure.
3339  *
3340  * @return
3341  *   0 on success, a negative errno value otherwise and rte_errno is set.
3342  */
3343 static int
3344 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
3345                                    const struct rte_flow_action *action,
3346                                    struct rte_flow_error *error)
3347 {
3348         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
3349                 return rte_flow_error_set(error, EINVAL,
3350                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3351                                           NULL, "action configuration not set");
3352         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3353                 return rte_flow_error_set(error, EINVAL,
3354                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3355                                           "can't have encap action before"
3356                                           " modify action");
3357         return 0;
3358 }
3359
3360 /**
3361  * Validate the modify-header MAC address actions.
3362  *
3363  * @param[in] action_flags
3364  *   Holds the actions detected until now.
3365  * @param[in] action
3366  *   Pointer to the modify action.
3367  * @param[in] item_flags
3368  *   Holds the items detected.
3369  * @param[out] error
3370  *   Pointer to error structure.
3371  *
3372  * @return
3373  *   0 on success, a negative errno value otherwise and rte_errno is set.
3374  */
3375 static int
3376 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
3377                                    const struct rte_flow_action *action,
3378                                    const uint64_t item_flags,
3379                                    struct rte_flow_error *error)
3380 {
3381         int ret = 0;
3382
3383         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3384         if (!ret) {
3385                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
3386                         return rte_flow_error_set(error, EINVAL,
3387                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3388                                                   NULL,
3389                                                   "no L2 item in pattern");
3390         }
3391         return ret;
3392 }
3393
3394 /**
3395  * Validate the modify-header IPv4 address actions.
3396  *
3397  * @param[in] action_flags
3398  *   Holds the actions detected until now.
3399  * @param[in] action
3400  *   Pointer to the modify action.
3401  * @param[in] item_flags
3402  *   Holds the items detected.
3403  * @param[out] error
3404  *   Pointer to error structure.
3405  *
3406  * @return
3407  *   0 on success, a negative errno value otherwise and rte_errno is set.
3408  */
3409 static int
3410 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
3411                                     const struct rte_flow_action *action,
3412                                     const uint64_t item_flags,
3413                                     struct rte_flow_error *error)
3414 {
3415         int ret = 0;
3416         uint64_t layer;
3417
3418         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3419         if (!ret) {
3420                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3421                                  MLX5_FLOW_LAYER_INNER_L3_IPV4 :
3422                                  MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3423                 if (!(item_flags & layer))
3424                         return rte_flow_error_set(error, EINVAL,
3425                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3426                                                   NULL,
3427                                                   "no ipv4 item in pattern");
3428         }
3429         return ret;
3430 }
3431
3432 /**
3433  * Validate the modify-header IPv6 address actions.
3434  *
3435  * @param[in] action_flags
3436  *   Holds the actions detected until now.
3437  * @param[in] action
3438  *   Pointer to the modify action.
3439  * @param[in] item_flags
3440  *   Holds the items detected.
3441  * @param[out] error
3442  *   Pointer to error structure.
3443  *
3444  * @return
3445  *   0 on success, a negative errno value otherwise and rte_errno is set.
3446  */
3447 static int
3448 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
3449                                     const struct rte_flow_action *action,
3450                                     const uint64_t item_flags,
3451                                     struct rte_flow_error *error)
3452 {
3453         int ret = 0;
3454         uint64_t layer;
3455
3456         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3457         if (!ret) {
3458                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3459                                  MLX5_FLOW_LAYER_INNER_L3_IPV6 :
3460                                  MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3461                 if (!(item_flags & layer))
3462                         return rte_flow_error_set(error, EINVAL,
3463                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3464                                                   NULL,
3465                                                   "no ipv6 item in pattern");
3466         }
3467         return ret;
3468 }
3469
3470 /**
3471  * Validate the modify-header TP actions.
3472  *
3473  * @param[in] action_flags
3474  *   Holds the actions detected until now.
3475  * @param[in] action
3476  *   Pointer to the modify action.
3477  * @param[in] item_flags
3478  *   Holds the items detected.
3479  * @param[out] error
3480  *   Pointer to error structure.
3481  *
3482  * @return
3483  *   0 on success, a negative errno value otherwise and rte_errno is set.
3484  */
3485 static int
3486 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
3487                                   const struct rte_flow_action *action,
3488                                   const uint64_t item_flags,
3489                                   struct rte_flow_error *error)
3490 {
3491         int ret = 0;
3492         uint64_t layer;
3493
3494         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3495         if (!ret) {
3496                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3497                                  MLX5_FLOW_LAYER_INNER_L4 :
3498                                  MLX5_FLOW_LAYER_OUTER_L4;
3499                 if (!(item_flags & layer))
3500                         return rte_flow_error_set(error, EINVAL,
3501                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3502                                                   NULL, "no transport layer "
3503                                                   "in pattern");
3504         }
3505         return ret;
3506 }
3507
3508 /**
3509  * Validate the modify-header actions of increment/decrement
3510  * TCP Sequence-number.
3511  *
3512  * @param[in] action_flags
3513  *   Holds the actions detected until now.
3514  * @param[in] action
3515  *   Pointer to the modify action.
3516  * @param[in] item_flags
3517  *   Holds the items detected.
3518  * @param[out] error
3519  *   Pointer to error structure.
3520  *
3521  * @return
3522  *   0 on success, a negative errno value otherwise and rte_errno is set.
3523  */
3524 static int
3525 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
3526                                        const struct rte_flow_action *action,
3527                                        const uint64_t item_flags,
3528                                        struct rte_flow_error *error)
3529 {
3530         int ret = 0;
3531         uint64_t layer;
3532
3533         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3534         if (!ret) {
3535                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3536                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
3537                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
3538                 if (!(item_flags & layer))
3539                         return rte_flow_error_set(error, EINVAL,
3540                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3541                                                   NULL, "no TCP item in"
3542                                                   " pattern");
3543                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
3544                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
3545                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
3546                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
3547                         return rte_flow_error_set(error, EINVAL,
3548                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3549                                                   NULL,
3550                                                   "cannot decrease and increase"
3551                                                   " TCP sequence number"
3552                                                   " at the same time");
3553         }
3554         return ret;
3555 }
3556
3557 /**
3558  * Validate the modify-header actions of increment/decrement
3559  * TCP Acknowledgment number.
3560  *
3561  * @param[in] action_flags
3562  *   Holds the actions detected until now.
3563  * @param[in] action
3564  *   Pointer to the modify action.
3565  * @param[in] item_flags
3566  *   Holds the items detected.
3567  * @param[out] error
3568  *   Pointer to error structure.
3569  *
3570  * @return
3571  *   0 on success, a negative errno value otherwise and rte_errno is set.
3572  */
3573 static int
3574 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
3575                                        const struct rte_flow_action *action,
3576                                        const uint64_t item_flags,
3577                                        struct rte_flow_error *error)
3578 {
3579         int ret = 0;
3580         uint64_t layer;
3581
3582         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3583         if (!ret) {
3584                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3585                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
3586                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
3587                 if (!(item_flags & layer))
3588                         return rte_flow_error_set(error, EINVAL,
3589                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3590                                                   NULL, "no TCP item in"
3591                                                   " pattern");
3592                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
3593                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
3594                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
3595                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
3596                         return rte_flow_error_set(error, EINVAL,
3597                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3598                                                   NULL,
3599                                                   "cannot decrease and increase"
3600                                                   " TCP acknowledgment number"
3601                                                   " at the same time");
3602         }
3603         return ret;
3604 }
3605
3606 /**
3607  * Validate the modify-header TTL actions.
3608  *
3609  * @param[in] action_flags
3610  *   Holds the actions detected until now.
3611  * @param[in] action
3612  *   Pointer to the modify action.
3613  * @param[in] item_flags
3614  *   Holds the items detected.
3615  * @param[out] error
3616  *   Pointer to error structure.
3617  *
3618  * @return
3619  *   0 on success, a negative errno value otherwise and rte_errno is set.
3620  */
3621 static int
3622 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
3623                                    const struct rte_flow_action *action,
3624                                    const uint64_t item_flags,
3625                                    struct rte_flow_error *error)
3626 {
3627         int ret = 0;
3628         uint64_t layer;
3629
3630         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3631         if (!ret) {
3632                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3633                                  MLX5_FLOW_LAYER_INNER_L3 :
3634                                  MLX5_FLOW_LAYER_OUTER_L3;
3635                 if (!(item_flags & layer))
3636                         return rte_flow_error_set(error, EINVAL,
3637                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3638                                                   NULL,
3639                                                   "no IP protocol in pattern");
3640         }
3641         return ret;
3642 }
3643
3644 /**
3645  * Validate jump action.
3646  *
3647  * @param[in] action
3648  *   Pointer to the jump action.
3649  * @param[in] action_flags
3650  *   Holds the actions detected until now.
3651  * @param[in] attributes
3652  *   Pointer to flow attributes
3653  * @param[in] external
3654  *   Action belongs to flow rule created by request external to PMD.
3655  * @param[out] error
3656  *   Pointer to error structure.
3657  *
3658  * @return
3659  *   0 on success, a negative errno value otherwise and rte_errno is set.
3660  */
3661 static int
3662 flow_dv_validate_action_jump(const struct rte_flow_action *action,
3663                              uint64_t action_flags,
3664                              const struct rte_flow_attr *attributes,
3665                              bool external, struct rte_flow_error *error)
3666 {
3667         uint32_t target_group, table;
3668         int ret = 0;
3669
3670         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
3671                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
3672                 return rte_flow_error_set(error, EINVAL,
3673                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3674                                           "can't have 2 fate actions in"
3675                                           " same flow");
3676         if (action_flags & MLX5_FLOW_ACTION_METER)
3677                 return rte_flow_error_set(error, ENOTSUP,
3678                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3679                                           "jump with meter not support");
3680         if (!action->conf)
3681                 return rte_flow_error_set(error, EINVAL,
3682                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3683                                           NULL, "action configuration not set");
3684         target_group =
3685                 ((const struct rte_flow_action_jump *)action->conf)->group;
3686         ret = mlx5_flow_group_to_table(attributes, external, target_group,
3687                                        true, &table, error);
3688         if (ret)
3689                 return ret;
3690         if (attributes->group == target_group)
3691                 return rte_flow_error_set(error, EINVAL,
3692                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3693                                           "target group must be other than"
3694                                           " the current flow group");
3695         return 0;
3696 }
3697
3698 /*
3699  * Validate the port_id action.
3700  *
3701  * @param[in] dev
3702  *   Pointer to rte_eth_dev structure.
3703  * @param[in] action_flags
3704  *   Bit-fields that holds the actions detected until now.
3705  * @param[in] action
3706  *   Port_id RTE action structure.
3707  * @param[in] attr
3708  *   Attributes of flow that includes this action.
3709  * @param[out] error
3710  *   Pointer to error structure.
3711  *
3712  * @return
3713  *   0 on success, a negative errno value otherwise and rte_errno is set.
3714  */
3715 static int
3716 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
3717                                 uint64_t action_flags,
3718                                 const struct rte_flow_action *action,
3719                                 const struct rte_flow_attr *attr,
3720                                 struct rte_flow_error *error)
3721 {
3722         const struct rte_flow_action_port_id *port_id;
3723         struct mlx5_priv *act_priv;
3724         struct mlx5_priv *dev_priv;
3725         uint16_t port;
3726
3727         if (!attr->transfer)
3728                 return rte_flow_error_set(error, ENOTSUP,
3729                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3730                                           NULL,
3731                                           "port id action is valid in transfer"
3732                                           " mode only");
3733         if (!action || !action->conf)
3734                 return rte_flow_error_set(error, ENOTSUP,
3735                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3736                                           NULL,
3737                                           "port id action parameters must be"
3738                                           " specified");
3739         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
3740                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
3741                 return rte_flow_error_set(error, EINVAL,
3742                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3743                                           "can have only one fate actions in"
3744                                           " a flow");
3745         dev_priv = mlx5_dev_to_eswitch_info(dev);
3746         if (!dev_priv)
3747                 return rte_flow_error_set(error, rte_errno,
3748                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3749                                           NULL,
3750                                           "failed to obtain E-Switch info");
3751         port_id = action->conf;
3752         port = port_id->original ? dev->data->port_id : port_id->id;
3753         act_priv = mlx5_port_to_eswitch_info(port, false);
3754         if (!act_priv)
3755                 return rte_flow_error_set
3756                                 (error, rte_errno,
3757                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
3758                                  "failed to obtain E-Switch port id for port");
3759         if (act_priv->domain_id != dev_priv->domain_id)
3760                 return rte_flow_error_set
3761                                 (error, EINVAL,
3762                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3763                                  "port does not belong to"
3764                                  " E-Switch being configured");
3765         return 0;
3766 }
3767
3768 /**
3769  * Get the maximum number of modify header actions.
3770  *
3771  * @param dev
3772  *   Pointer to rte_eth_dev structure.
3773  * @param flags
3774  *   Flags bits to check if root level.
3775  *
3776  * @return
3777  *   Max number of modify header actions device can support.
3778  */
3779 static inline unsigned int
3780 flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
3781                               uint64_t flags)
3782 {
3783         /*
3784          * There's no way to directly query the max capacity from FW.
3785          * The maximal value on root table should be assumed to be supported.
3786          */
3787         if (!(flags & MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL))
3788                 return MLX5_MAX_MODIFY_NUM;
3789         else
3790                 return MLX5_ROOT_TBL_MODIFY_NUM;
3791 }
3792
3793 /**
3794  * Validate the meter action.
3795  *
3796  * @param[in] dev
3797  *   Pointer to rte_eth_dev structure.
3798  * @param[in] action_flags
3799  *   Bit-fields that holds the actions detected until now.
3800  * @param[in] action
3801  *   Pointer to the meter action.
3802  * @param[in] attr
3803  *   Attributes of flow that includes this action.
3804  * @param[out] error
3805  *   Pointer to error structure.
3806  *
3807  * @return
3808  *   0 on success, a negative errno value otherwise and rte_ernno is set.
3809  */
3810 static int
3811 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
3812                                 uint64_t action_flags,
3813                                 const struct rte_flow_action *action,
3814                                 const struct rte_flow_attr *attr,
3815                                 struct rte_flow_error *error)
3816 {
3817         struct mlx5_priv *priv = dev->data->dev_private;
3818         const struct rte_flow_action_meter *am = action->conf;
3819         struct mlx5_flow_meter *fm;
3820
3821         if (!am)
3822                 return rte_flow_error_set(error, EINVAL,
3823                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3824                                           "meter action conf is NULL");
3825
3826         if (action_flags & MLX5_FLOW_ACTION_METER)
3827                 return rte_flow_error_set(error, ENOTSUP,
3828                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3829                                           "meter chaining not support");
3830         if (action_flags & MLX5_FLOW_ACTION_JUMP)
3831                 return rte_flow_error_set(error, ENOTSUP,
3832                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3833                                           "meter with jump not support");
3834         if (!priv->mtr_en)
3835                 return rte_flow_error_set(error, ENOTSUP,
3836                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3837                                           NULL,
3838                                           "meter action not supported");
3839         fm = mlx5_flow_meter_find(priv, am->mtr_id);
3840         if (!fm)
3841                 return rte_flow_error_set(error, EINVAL,
3842                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3843                                           "Meter not found");
3844         if (fm->ref_cnt && (!(fm->transfer == attr->transfer ||
3845               (!fm->ingress && !attr->ingress && attr->egress) ||
3846               (!fm->egress && !attr->egress && attr->ingress))))
3847                 return rte_flow_error_set(error, EINVAL,
3848                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3849                                           "Flow attributes are either invalid "
3850                                           "or have a conflict with current "
3851                                           "meter attributes");
3852         return 0;
3853 }
3854
3855 /**
3856  * Validate the age action.
3857  *
3858  * @param[in] action_flags
3859  *   Holds the actions detected until now.
3860  * @param[in] action
3861  *   Pointer to the age action.
3862  * @param[in] dev
3863  *   Pointer to the Ethernet device structure.
3864  * @param[out] error
3865  *   Pointer to error structure.
3866  *
3867  * @return
3868  *   0 on success, a negative errno value otherwise and rte_errno is set.
3869  */
3870 static int
3871 flow_dv_validate_action_age(uint64_t action_flags,
3872                             const struct rte_flow_action *action,
3873                             struct rte_eth_dev *dev,
3874                             struct rte_flow_error *error)
3875 {
3876         struct mlx5_priv *priv = dev->data->dev_private;
3877         const struct rte_flow_action_age *age = action->conf;
3878
3879         if (!priv->config.devx || priv->counter_fallback)
3880                 return rte_flow_error_set(error, ENOTSUP,
3881                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3882                                           NULL,
3883                                           "age action not supported");
3884         if (!(action->conf))
3885                 return rte_flow_error_set(error, EINVAL,
3886                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3887                                           "configuration cannot be null");
3888         if (age->timeout >= UINT16_MAX / 2 / 10)
3889                 return rte_flow_error_set(error, ENOTSUP,
3890                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3891                                           "Max age time: 3275 seconds");
3892         if (action_flags & MLX5_FLOW_ACTION_AGE)
3893                 return rte_flow_error_set(error, EINVAL,
3894                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3895                                           "Duplicate age ctions set");
3896         return 0;
3897 }
3898
3899 /**
3900  * Validate the modify-header IPv4 DSCP actions.
3901  *
3902  * @param[in] action_flags
3903  *   Holds the actions detected until now.
3904  * @param[in] action
3905  *   Pointer to the modify action.
3906  * @param[in] item_flags
3907  *   Holds the items detected.
3908  * @param[out] error
3909  *   Pointer to error structure.
3910  *
3911  * @return
3912  *   0 on success, a negative errno value otherwise and rte_errno is set.
3913  */
3914 static int
3915 flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags,
3916                                          const struct rte_flow_action *action,
3917                                          const uint64_t item_flags,
3918                                          struct rte_flow_error *error)
3919 {
3920         int ret = 0;
3921
3922         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3923         if (!ret) {
3924                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
3925                         return rte_flow_error_set(error, EINVAL,
3926                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3927                                                   NULL,
3928                                                   "no ipv4 item in pattern");
3929         }
3930         return ret;
3931 }
3932
3933 /**
3934  * Validate the modify-header IPv6 DSCP actions.
3935  *
3936  * @param[in] action_flags
3937  *   Holds the actions detected until now.
3938  * @param[in] action
3939  *   Pointer to the modify action.
3940  * @param[in] item_flags
3941  *   Holds the items detected.
3942  * @param[out] error
3943  *   Pointer to error structure.
3944  *
3945  * @return
3946  *   0 on success, a negative errno value otherwise and rte_errno is set.
3947  */
3948 static int
3949 flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,
3950                                          const struct rte_flow_action *action,
3951                                          const uint64_t item_flags,
3952                                          struct rte_flow_error *error)
3953 {
3954         int ret = 0;
3955
3956         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3957         if (!ret) {
3958                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
3959                         return rte_flow_error_set(error, EINVAL,
3960                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3961                                                   NULL,
3962                                                   "no ipv6 item in pattern");
3963         }
3964         return ret;
3965 }
3966
3967 /**
3968  * Find existing modify-header resource or create and register a new one.
3969  *
3970  * @param dev[in, out]
3971  *   Pointer to rte_eth_dev structure.
3972  * @param[in, out] resource
3973  *   Pointer to modify-header resource.
3974  * @parm[in, out] dev_flow
3975  *   Pointer to the dev_flow.
3976  * @param[out] error
3977  *   pointer to error structure.
3978  *
3979  * @return
3980  *   0 on success otherwise -errno and errno is set.
3981  */
3982 static int
3983 flow_dv_modify_hdr_resource_register
3984                         (struct rte_eth_dev *dev,
3985                          struct mlx5_flow_dv_modify_hdr_resource *resource,
3986                          struct mlx5_flow *dev_flow,
3987                          struct rte_flow_error *error)
3988 {
3989         struct mlx5_priv *priv = dev->data->dev_private;
3990         struct mlx5_dev_ctx_shared *sh = priv->sh;
3991         struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
3992         struct mlx5dv_dr_domain *ns;
3993         uint32_t actions_len;
3994         int ret;
3995
3996         resource->flags = dev_flow->dv.group ? 0 :
3997                           MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
3998         if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
3999                                     resource->flags))
4000                 return rte_flow_error_set(error, EOVERFLOW,
4001                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4002                                           "too many modify header items");
4003         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
4004                 ns = sh->fdb_domain;
4005         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
4006                 ns = sh->tx_domain;
4007         else
4008                 ns = sh->rx_domain;
4009         /* Lookup a matching resource from cache. */
4010         actions_len = resource->actions_num * sizeof(resource->actions[0]);
4011         LIST_FOREACH(cache_resource, &sh->modify_cmds, next) {
4012                 if (resource->ft_type == cache_resource->ft_type &&
4013                     resource->actions_num == cache_resource->actions_num &&
4014                     resource->flags == cache_resource->flags &&
4015                     !memcmp((const void *)resource->actions,
4016                             (const void *)cache_resource->actions,
4017                             actions_len)) {
4018                         DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++",
4019                                 (void *)cache_resource,
4020                                 rte_atomic32_read(&cache_resource->refcnt));
4021                         rte_atomic32_inc(&cache_resource->refcnt);
4022                         dev_flow->handle->dvh.modify_hdr = cache_resource;
4023                         return 0;
4024                 }
4025         }
4026         /* Register new modify-header resource. */
4027         cache_resource = rte_calloc(__func__, 1,
4028                                     sizeof(*cache_resource) + actions_len, 0);
4029         if (!cache_resource)
4030                 return rte_flow_error_set(error, ENOMEM,
4031                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4032                                           "cannot allocate resource memory");
4033         *cache_resource = *resource;
4034         rte_memcpy(cache_resource->actions, resource->actions, actions_len);
4035         ret = mlx5_flow_os_create_flow_action_modify_header
4036                                         (sh->ctx, ns, cache_resource,
4037                                          actions_len, &cache_resource->action);
4038         if (ret) {
4039                 rte_free(cache_resource);
4040                 return rte_flow_error_set(error, ENOMEM,
4041                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4042                                           NULL, "cannot create action");
4043         }
4044         rte_atomic32_init(&cache_resource->refcnt);
4045         rte_atomic32_inc(&cache_resource->refcnt);
4046         LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
4047         dev_flow->handle->dvh.modify_hdr = cache_resource;
4048         DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
4049                 (void *)cache_resource,
4050                 rte_atomic32_read(&cache_resource->refcnt));
4051         return 0;
4052 }
4053
4054 /**
4055  * Get DV flow counter by index.
4056  *
4057  * @param[in] dev
4058  *   Pointer to the Ethernet device structure.
4059  * @param[in] idx
4060  *   mlx5 flow counter index in the container.
4061  * @param[out] ppool
4062  *   mlx5 flow counter pool in the container,
4063  *
4064  * @return
4065  *   Pointer to the counter, NULL otherwise.
4066  */
4067 static struct mlx5_flow_counter *
4068 flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
4069                            uint32_t idx,
4070                            struct mlx5_flow_counter_pool **ppool)
4071 {
4072         struct mlx5_priv *priv = dev->data->dev_private;
4073         struct mlx5_pools_container *cont;
4074         struct mlx5_flow_counter_pool *pool;
4075         uint32_t batch = 0, age = 0;
4076
4077         idx--;
4078         age = MLX_CNT_IS_AGE(idx);
4079         idx = age ? idx - MLX5_CNT_AGE_OFFSET : idx;
4080         if (idx >= MLX5_CNT_BATCH_OFFSET) {
4081                 idx -= MLX5_CNT_BATCH_OFFSET;
4082                 batch = 1;
4083         }
4084         cont = MLX5_CNT_CONTAINER(priv->sh, batch, age);
4085         MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cont->n);
4086         pool = cont->pools[idx / MLX5_COUNTERS_PER_POOL];
4087         MLX5_ASSERT(pool);
4088         if (ppool)
4089                 *ppool = pool;
4090         return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
4091 }
4092
4093 /**
4094  * Check the devx counter belongs to the pool.
4095  *
4096  * @param[in] pool
4097  *   Pointer to the counter pool.
4098  * @param[in] id
4099  *   The counter devx ID.
4100  *
4101  * @return
4102  *   True if counter belongs to the pool, false otherwise.
4103  */
4104 static bool
4105 flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id)
4106 {
4107         int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
4108                    MLX5_COUNTERS_PER_POOL;
4109
4110         if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
4111                 return true;
4112         return false;
4113 }
4114
4115 /**
4116  * Get a pool by devx counter ID.
4117  *
4118  * @param[in] cont
4119  *   Pointer to the counter container.
4120  * @param[in] id
4121  *   The counter devx ID.
4122  *
4123  * @return
4124  *   The counter pool pointer if exists, NULL otherwise,
4125  */
4126 static struct mlx5_flow_counter_pool *
4127 flow_dv_find_pool_by_id(struct mlx5_pools_container *cont, int id)
4128 {
4129         uint32_t i;
4130
4131         /* Check last used pool. */
4132         if (cont->last_pool_idx != POOL_IDX_INVALID &&
4133             flow_dv_is_counter_in_pool(cont->pools[cont->last_pool_idx], id))
4134                 return cont->pools[cont->last_pool_idx];
4135         /* ID out of range means no suitable pool in the container. */
4136         if (id > cont->max_id || id < cont->min_id)
4137                 return NULL;
4138         /*
4139          * Find the pool from the end of the container, since mostly counter
4140          * ID is sequence increasing, and the last pool should be the needed
4141          * one.
4142          */
4143         i = rte_atomic16_read(&cont->n_valid);
4144         while (i--) {
4145                 struct mlx5_flow_counter_pool *pool = cont->pools[i];
4146
4147                 if (flow_dv_is_counter_in_pool(pool, id))
4148                         return pool;
4149         }
4150         return NULL;
4151 }
4152
4153 /**
4154  * Allocate a new memory for the counter values wrapped by all the needed
4155  * management.
4156  *
4157  * @param[in] dev
4158  *   Pointer to the Ethernet device structure.
4159  * @param[in] raws_n
4160  *   The raw memory areas - each one for MLX5_COUNTERS_PER_POOL counters.
4161  *
4162  * @return
4163  *   The new memory management pointer on success, otherwise NULL and rte_errno
4164  *   is set.
4165  */
4166 static struct mlx5_counter_stats_mem_mng *
4167 flow_dv_create_counter_stat_mem_mng(struct rte_eth_dev *dev, int raws_n)
4168 {
4169         struct mlx5_priv *priv = dev->data->dev_private;
4170         struct mlx5_dev_ctx_shared *sh = priv->sh;
4171         struct mlx5_devx_mkey_attr mkey_attr;
4172         struct mlx5_counter_stats_mem_mng *mem_mng;
4173         volatile struct flow_counter_stats *raw_data;
4174         int size = (sizeof(struct flow_counter_stats) *
4175                         MLX5_COUNTERS_PER_POOL +
4176                         sizeof(struct mlx5_counter_stats_raw)) * raws_n +
4177                         sizeof(struct mlx5_counter_stats_mem_mng);
4178         uint8_t *mem = rte_calloc(__func__, 1, size, sysconf(_SC_PAGESIZE));
4179         int i;
4180
4181         if (!mem) {
4182                 rte_errno = ENOMEM;
4183                 return NULL;
4184         }
4185         mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1;
4186         size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n;
4187         mem_mng->umem = mlx5_glue->devx_umem_reg(sh->ctx, mem, size,
4188                                                  IBV_ACCESS_LOCAL_WRITE);
4189         if (!mem_mng->umem) {
4190                 rte_errno = errno;
4191                 rte_free(mem);
4192                 return NULL;
4193         }
4194         mkey_attr.addr = (uintptr_t)mem;
4195         mkey_attr.size = size;
4196         mkey_attr.umem_id = mlx5_os_get_umem_id(mem_mng->umem);
4197         mkey_attr.pd = sh->pdn;
4198         mkey_attr.log_entity_size = 0;
4199         mkey_attr.pg_access = 0;
4200         mkey_attr.klm_array = NULL;
4201         mkey_attr.klm_num = 0;
4202         if (priv->config.hca_attr.relaxed_ordering_write &&
4203                 priv->config.hca_attr.relaxed_ordering_read  &&
4204                 !haswell_broadwell_cpu)
4205                 mkey_attr.relaxed_ordering = 1;
4206         mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr);
4207         if (!mem_mng->dm) {
4208                 mlx5_glue->devx_umem_dereg(mem_mng->umem);
4209                 rte_errno = errno;
4210                 rte_free(mem);
4211                 return NULL;
4212         }
4213         mem_mng->raws = (struct mlx5_counter_stats_raw *)(mem + size);
4214         raw_data = (volatile struct flow_counter_stats *)mem;
4215         for (i = 0; i < raws_n; ++i) {
4216                 mem_mng->raws[i].mem_mng = mem_mng;
4217                 mem_mng->raws[i].data = raw_data + i * MLX5_COUNTERS_PER_POOL;
4218         }
4219         LIST_INSERT_HEAD(&sh->cmng.mem_mngs, mem_mng, next);
4220         return mem_mng;
4221 }
4222
4223 /**
4224  * Resize a counter container.
4225  *
4226  * @param[in] dev
4227  *   Pointer to the Ethernet device structure.
4228  * @param[in] batch
4229  *   Whether the pool is for counter that was allocated by batch command.
4230  * @param[in] age
4231  *   Whether the pool is for Aging counter.
4232  *
4233  * @return
4234  *   0 on success, otherwise negative errno value and rte_errno is set.
4235  */
4236 static int
4237 flow_dv_container_resize(struct rte_eth_dev *dev,
4238                                 uint32_t batch, uint32_t age)
4239 {
4240         struct mlx5_priv *priv = dev->data->dev_private;
4241         struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
4242                                                                age);
4243         struct mlx5_counter_stats_mem_mng *mem_mng = NULL;
4244         void *old_pools = cont->pools;
4245         uint32_t resize = cont->n + MLX5_CNT_CONTAINER_RESIZE;
4246         uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
4247         void *pools = rte_calloc(__func__, 1, mem_size, 0);
4248
4249         if (!pools) {
4250                 rte_errno = ENOMEM;
4251                 return -ENOMEM;
4252         }
4253         if (old_pools)
4254                 memcpy(pools, old_pools, cont->n *
4255                                        sizeof(struct mlx5_flow_counter_pool *));
4256         /*
4257          * Fallback mode query the counter directly, no background query
4258          * resources are needed.
4259          */
4260         if (!priv->counter_fallback) {
4261                 int i;
4262
4263                 mem_mng = flow_dv_create_counter_stat_mem_mng(dev,
4264                           MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES);
4265                 if (!mem_mng) {
4266                         rte_free(pools);
4267                         return -ENOMEM;
4268                 }
4269                 for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i)
4270                         LIST_INSERT_HEAD(&priv->sh->cmng.free_stat_raws,
4271                                          mem_mng->raws +
4272                                          MLX5_CNT_CONTAINER_RESIZE +
4273                                          i, next);
4274         }
4275         rte_spinlock_lock(&cont->resize_sl);
4276         cont->n = resize;
4277         cont->mem_mng = mem_mng;
4278         cont->pools = pools;
4279         rte_spinlock_unlock(&cont->resize_sl);
4280         if (old_pools)
4281                 rte_free(old_pools);
4282         return 0;
4283 }
4284
4285 /**
4286  * Query a devx flow counter.
4287  *
4288  * @param[in] dev
4289  *   Pointer to the Ethernet device structure.
4290  * @param[in] cnt
4291  *   Index to the flow counter.
4292  * @param[out] pkts
4293  *   The statistics value of packets.
4294  * @param[out] bytes
4295  *   The statistics value of bytes.
4296  *
4297  * @return
4298  *   0 on success, otherwise a negative errno value and rte_errno is set.
4299  */
4300 static inline int
4301 _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
4302                      uint64_t *bytes)
4303 {
4304         struct mlx5_priv *priv = dev->data->dev_private;
4305         struct mlx5_flow_counter_pool *pool = NULL;
4306         struct mlx5_flow_counter *cnt;
4307         struct mlx5_flow_counter_ext *cnt_ext = NULL;
4308         int offset;
4309
4310         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
4311         MLX5_ASSERT(pool);
4312         if (counter < MLX5_CNT_BATCH_OFFSET) {
4313                 cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt);
4314                 if (priv->counter_fallback)
4315                         return mlx5_devx_cmd_flow_counter_query(cnt_ext->dcs, 0,
4316                                         0, pkts, bytes, 0, NULL, NULL, 0);
4317         }
4318
4319         rte_spinlock_lock(&pool->sl);
4320         /*
4321          * The single counters allocation may allocate smaller ID than the
4322          * current allocated in parallel to the host reading.
4323          * In this case the new counter values must be reported as 0.
4324          */
4325         if (unlikely(cnt_ext && cnt_ext->dcs->id < pool->raw->min_dcs_id)) {
4326                 *pkts = 0;
4327                 *bytes = 0;
4328         } else {
4329                 offset = MLX5_CNT_ARRAY_IDX(pool, cnt);
4330                 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
4331                 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
4332         }
4333         rte_spinlock_unlock(&pool->sl);
4334         return 0;
4335 }
4336
4337 /**
4338  * Create and initialize a new counter pool.
4339  *
4340  * @param[in] dev
4341  *   Pointer to the Ethernet device structure.
4342  * @param[out] dcs
4343  *   The devX counter handle.
4344  * @param[in] batch
4345  *   Whether the pool is for counter that was allocated by batch command.
4346  * @param[in] age
4347  *   Whether the pool is for counter that was allocated for aging.
4348  * @param[in/out] cont_cur
4349  *   Pointer to the container pointer, it will be update in pool resize.
4350  *
4351  * @return
4352  *   The pool container pointer on success, NULL otherwise and rte_errno is set.
4353  */
4354 static struct mlx5_flow_counter_pool *
4355 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
4356                     uint32_t batch, uint32_t age)
4357 {
4358         struct mlx5_priv *priv = dev->data->dev_private;
4359         struct mlx5_flow_counter_pool *pool;
4360         struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
4361                                                                age);
4362         int16_t n_valid = rte_atomic16_read(&cont->n_valid);
4363         uint32_t size = sizeof(*pool);
4364
4365         if (cont->n == n_valid && flow_dv_container_resize(dev, batch, age))
4366                 return NULL;
4367         size += MLX5_COUNTERS_PER_POOL * CNT_SIZE;
4368         size += (batch ? 0 : MLX5_COUNTERS_PER_POOL * CNTEXT_SIZE);
4369         size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * AGE_SIZE);
4370         pool = rte_calloc(__func__, 1, size, 0);
4371         if (!pool) {
4372                 rte_errno = ENOMEM;
4373                 return NULL;
4374         }
4375         pool->min_dcs = dcs;
4376         if (!priv->counter_fallback)
4377                 pool->raw = cont->mem_mng->raws + n_valid %
4378                                                       MLX5_CNT_CONTAINER_RESIZE;
4379         pool->raw_hw = NULL;
4380         pool->type = 0;
4381         pool->type |= (batch ? 0 :  CNT_POOL_TYPE_EXT);
4382         pool->type |= (!age ? 0 :  CNT_POOL_TYPE_AGE);
4383         pool->query_gen = 0;
4384         rte_spinlock_init(&pool->sl);
4385         TAILQ_INIT(&pool->counters[0]);
4386         TAILQ_INIT(&pool->counters[1]);
4387         TAILQ_INSERT_HEAD(&cont->pool_list, pool, next);
4388         pool->index = n_valid;
4389         cont->pools[n_valid] = pool;
4390         if (!batch) {
4391                 int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
4392
4393                 if (base < cont->min_id)
4394                         cont->min_id = base;
4395                 if (base > cont->max_id)
4396                         cont->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
4397                 cont->last_pool_idx = pool->index;
4398         }
4399         /* Pool initialization must be updated before host thread access. */
4400         rte_cio_wmb();
4401         rte_atomic16_add(&cont->n_valid, 1);
4402         return pool;
4403 }
4404
4405 /**
4406  * Update the minimum dcs-id for aged or no-aged counter pool.
4407  *
4408  * @param[in] dev
4409  *   Pointer to the Ethernet device structure.
4410  * @param[in] pool
4411  *   Current counter pool.
4412  * @param[in] batch
4413  *   Whether the pool is for counter that was allocated by batch command.
4414  * @param[in] age
4415  *   Whether the counter is for aging.
4416  */
4417 static void
4418 flow_dv_counter_update_min_dcs(struct rte_eth_dev *dev,
4419                         struct mlx5_flow_counter_pool *pool,
4420                         uint32_t batch, uint32_t age)
4421 {
4422         struct mlx5_priv *priv = dev->data->dev_private;
4423         struct mlx5_flow_counter_pool *other;
4424         struct mlx5_pools_container *cont;
4425
4426         cont = MLX5_CNT_CONTAINER(priv->sh, batch, (age ^ 0x1));
4427         other = flow_dv_find_pool_by_id(cont, pool->min_dcs->id);
4428         if (!other)
4429                 return;
4430         if (pool->min_dcs->id < other->min_dcs->id) {
4431                 rte_atomic64_set(&other->a64_dcs,
4432                         rte_atomic64_read(&pool->a64_dcs));
4433         } else {
4434                 rte_atomic64_set(&pool->a64_dcs,
4435                         rte_atomic64_read(&other->a64_dcs));
4436         }
4437 }
4438 /**
4439  * Prepare a new counter and/or a new counter pool.
4440  *
4441  * @param[in] dev
4442  *   Pointer to the Ethernet device structure.
4443  * @param[out] cnt_free
4444  *   Where to put the pointer of a new counter.
4445  * @param[in] batch
4446  *   Whether the pool is for counter that was allocated by batch command.
4447  * @param[in] age
4448  *   Whether the pool is for counter that was allocated for aging.
4449  *
4450  * @return
4451  *   The counter pool pointer and @p cnt_free is set on success,
4452  *   NULL otherwise and rte_errno is set.
4453  */
4454 static struct mlx5_flow_counter_pool *
4455 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
4456                              struct mlx5_flow_counter **cnt_free,
4457                              uint32_t batch, uint32_t age)
4458 {
4459         struct mlx5_priv *priv = dev->data->dev_private;
4460         struct mlx5_pools_container *cont;
4461         struct mlx5_flow_counter_pool *pool;
4462         struct mlx5_counters tmp_tq;
4463         struct mlx5_devx_obj *dcs = NULL;
4464         struct mlx5_flow_counter *cnt;
4465         uint32_t i;
4466
4467         cont = MLX5_CNT_CONTAINER(priv->sh, batch, age);
4468         if (!batch) {
4469                 /* bulk_bitmap must be 0 for single counter allocation. */
4470                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
4471                 if (!dcs)
4472                         return NULL;
4473                 pool = flow_dv_find_pool_by_id(cont, dcs->id);
4474                 if (!pool) {
4475                         pool = flow_dv_pool_create(dev, dcs, batch, age);
4476                         if (!pool) {
4477                                 mlx5_devx_cmd_destroy(dcs);
4478                                 return NULL;
4479                         }
4480                 } else if (dcs->id < pool->min_dcs->id) {
4481                         rte_atomic64_set(&pool->a64_dcs,
4482                                          (int64_t)(uintptr_t)dcs);
4483                 }
4484                 flow_dv_counter_update_min_dcs(dev,
4485                                                 pool, batch, age);
4486                 i = dcs->id % MLX5_COUNTERS_PER_POOL;
4487                 cnt = MLX5_POOL_GET_CNT(pool, i);
4488                 cnt->pool = pool;
4489                 MLX5_GET_POOL_CNT_EXT(pool, i)->dcs = dcs;
4490                 *cnt_free = cnt;
4491                 return pool;
4492         }
4493         /* bulk_bitmap is in 128 counters units. */
4494         if (priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4)
4495                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
4496         if (!dcs) {
4497                 rte_errno = ENODATA;
4498                 return NULL;
4499         }
4500         pool = flow_dv_pool_create(dev, dcs, batch, age);
4501         if (!pool) {
4502                 mlx5_devx_cmd_destroy(dcs);
4503                 return NULL;
4504         }
4505         TAILQ_INIT(&tmp_tq);
4506         for (i = 1; i < MLX5_COUNTERS_PER_POOL; ++i) {
4507                 cnt = MLX5_POOL_GET_CNT(pool, i);
4508                 cnt->pool = pool;
4509                 TAILQ_INSERT_HEAD(&tmp_tq, cnt, next);
4510         }
4511         rte_spinlock_lock(&cont->csl);
4512         TAILQ_CONCAT(&cont->counters, &tmp_tq, next);
4513         rte_spinlock_unlock(&cont->csl);
4514         *cnt_free = MLX5_POOL_GET_CNT(pool, 0);
4515         (*cnt_free)->pool = pool;
4516         return pool;
4517 }
4518
4519 /**
4520  * Search for existed shared counter.
4521  *
4522  * @param[in] dev
4523  *   Pointer to the Ethernet device structure.
4524  * @param[in] id
4525  *   The shared counter ID to search.
4526  * @param[out] ppool
4527  *   mlx5 flow counter pool in the container,
4528  *
4529  * @return
4530  *   NULL if not existed, otherwise pointer to the shared extend counter.
4531  */
4532 static struct mlx5_flow_counter_ext *
4533 flow_dv_counter_shared_search(struct rte_eth_dev *dev, uint32_t id,
4534                               struct mlx5_flow_counter_pool **ppool)
4535 {
4536         struct mlx5_priv *priv = dev->data->dev_private;
4537         union mlx5_l3t_data data;
4538         uint32_t cnt_idx;
4539
4540         if (mlx5_l3t_get_entry(priv->sh->cnt_id_tbl, id, &data) || !data.dword)
4541                 return NULL;
4542         cnt_idx = data.dword;
4543         /*
4544          * Shared counters don't have age info. The counter extend is after
4545          * the counter datat structure.
4546          */
4547         return (struct mlx5_flow_counter_ext *)
4548                ((flow_dv_counter_get_by_idx(dev, cnt_idx, ppool)) + 1);
4549 }
4550
4551 /**
4552  * Allocate a flow counter.
4553  *
4554  * @param[in] dev
4555  *   Pointer to the Ethernet device structure.
4556  * @param[in] shared
4557  *   Indicate if this counter is shared with other flows.
4558  * @param[in] id
4559  *   Counter identifier.
4560  * @param[in] group
4561  *   Counter flow group.
4562  * @param[in] age
4563  *   Whether the counter was allocated for aging.
4564  *
4565  * @return
4566  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
4567  */
4568 static uint32_t
4569 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id,
4570                       uint16_t group, uint32_t age)
4571 {
4572         struct mlx5_priv *priv = dev->data->dev_private;
4573         struct mlx5_flow_counter_pool *pool = NULL;
4574         struct mlx5_flow_counter *cnt_free = NULL;
4575         struct mlx5_flow_counter_ext *cnt_ext = NULL;
4576         /*
4577          * Currently group 0 flow counter cannot be assigned to a flow if it is
4578          * not the first one in the batch counter allocation, so it is better
4579          * to allocate counters one by one for these flows in a separate
4580          * container.
4581          * A counter can be shared between different groups so need to take
4582          * shared counters from the single container.
4583          */
4584         uint32_t batch = (group && !shared && !priv->counter_fallback) ? 1 : 0;
4585         struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
4586                                                                age);
4587         uint32_t cnt_idx;
4588
4589         if (!priv->config.devx) {
4590                 rte_errno = ENOTSUP;
4591                 return 0;
4592         }
4593         if (shared) {
4594                 cnt_ext = flow_dv_counter_shared_search(dev, id, &pool);
4595                 if (cnt_ext) {
4596                         if (cnt_ext->ref_cnt + 1 == 0) {
4597                                 rte_errno = E2BIG;
4598                                 return 0;
4599                         }
4600                         cnt_ext->ref_cnt++;
4601                         cnt_idx = pool->index * MLX5_COUNTERS_PER_POOL +
4602                                   (cnt_ext->dcs->id % MLX5_COUNTERS_PER_POOL)
4603                                   + 1;
4604                         return cnt_idx;
4605                 }
4606         }
4607         /* Get free counters from container. */
4608         rte_spinlock_lock(&cont->csl);
4609         cnt_free = TAILQ_FIRST(&cont->counters);
4610         if (cnt_free)
4611                 TAILQ_REMOVE(&cont->counters, cnt_free, next);
4612         rte_spinlock_unlock(&cont->csl);
4613         if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free,
4614                                                        batch, age))
4615                 goto err;
4616         pool = cnt_free->pool;
4617         if (!batch)
4618                 cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt_free);
4619         /* Create a DV counter action only in the first time usage. */
4620         if (!cnt_free->action) {
4621                 uint16_t offset;
4622                 struct mlx5_devx_obj *dcs;
4623                 int ret;
4624
4625                 if (batch) {
4626                         offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free);
4627                         dcs = pool->min_dcs;
4628                 } else {
4629                         offset = 0;
4630                         dcs = cnt_ext->dcs;
4631                 }
4632                 ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset,
4633                                                             &cnt_free->action);
4634                 if (ret) {
4635                         rte_errno = errno;
4636                         goto err;
4637                 }
4638         }
4639         cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
4640                                 MLX5_CNT_ARRAY_IDX(pool, cnt_free));
4641         cnt_idx += batch * MLX5_CNT_BATCH_OFFSET;
4642         cnt_idx += age * MLX5_CNT_AGE_OFFSET;
4643         /* Update the counter reset values. */
4644         if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
4645                                  &cnt_free->bytes))
4646                 goto err;
4647         if (cnt_ext) {
4648                 cnt_ext->shared = shared;
4649                 cnt_ext->ref_cnt = 1;
4650                 cnt_ext->id = id;
4651                 if (shared) {
4652                         union mlx5_l3t_data data;
4653
4654                         data.dword = cnt_idx;
4655                         if (mlx5_l3t_set_entry(priv->sh->cnt_id_tbl, id, &data))
4656                                 return 0;
4657                 }
4658         }
4659         if (!priv->counter_fallback && !priv->sh->cmng.query_thread_on)
4660                 /* Start the asynchronous batch query by the host thread. */
4661                 mlx5_set_query_alarm(priv->sh);
4662         return cnt_idx;
4663 err:
4664         if (cnt_free) {
4665                 cnt_free->pool = pool;
4666                 rte_spinlock_lock(&cont->csl);
4667                 TAILQ_INSERT_TAIL(&cont->counters, cnt_free, next);
4668                 rte_spinlock_unlock(&cont->csl);
4669         }
4670         return 0;
4671 }
4672
4673 /**
4674  * Get age param from counter index.
4675  *
4676  * @param[in] dev
4677  *   Pointer to the Ethernet device structure.
4678  * @param[in] counter
4679  *   Index to the counter handler.
4680  *
4681  * @return
4682  *   The aging parameter specified for the counter index.
4683  */
4684 static struct mlx5_age_param*
4685 flow_dv_counter_idx_get_age(struct rte_eth_dev *dev,
4686                                 uint32_t counter)
4687 {
4688         struct mlx5_flow_counter *cnt;
4689         struct mlx5_flow_counter_pool *pool = NULL;
4690
4691         flow_dv_counter_get_by_idx(dev, counter, &pool);
4692         counter = (counter - 1) % MLX5_COUNTERS_PER_POOL;
4693         cnt = MLX5_POOL_GET_CNT(pool, counter);
4694         return MLX5_CNT_TO_AGE(cnt);
4695 }
4696
4697 /**
4698  * Remove a flow counter from aged counter list.
4699  *
4700  * @param[in] dev
4701  *   Pointer to the Ethernet device structure.
4702  * @param[in] counter
4703  *   Index to the counter handler.
4704  * @param[in] cnt
4705  *   Pointer to the counter handler.
4706  */
4707 static void
4708 flow_dv_counter_remove_from_age(struct rte_eth_dev *dev,
4709                                 uint32_t counter, struct mlx5_flow_counter *cnt)
4710 {
4711         struct mlx5_age_info *age_info;
4712         struct mlx5_age_param *age_param;
4713         struct mlx5_priv *priv = dev->data->dev_private;
4714
4715         age_info = GET_PORT_AGE_INFO(priv);
4716         age_param = flow_dv_counter_idx_get_age(dev, counter);
4717         if (rte_atomic16_cmpset((volatile uint16_t *)
4718                         &age_param->state,
4719                         AGE_CANDIDATE, AGE_FREE)
4720                         != AGE_CANDIDATE) {
4721                 /**
4722                  * We need the lock even it is age timeout,
4723                  * since counter may still in process.
4724                  */
4725                 rte_spinlock_lock(&age_info->aged_sl);
4726                 TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
4727                 rte_spinlock_unlock(&age_info->aged_sl);
4728         }
4729         rte_atomic16_set(&age_param->state, AGE_FREE);
4730 }
4731 /**
4732  * Release a flow counter.
4733  *
4734  * @param[in] dev
4735  *   Pointer to the Ethernet device structure.
4736  * @param[in] counter
4737  *   Index to the counter handler.
4738  */
4739 static void
4740 flow_dv_counter_release(struct rte_eth_dev *dev, uint32_t counter)
4741 {
4742         struct mlx5_priv *priv = dev->data->dev_private;
4743         struct mlx5_flow_counter_pool *pool = NULL;
4744         struct mlx5_flow_counter *cnt;
4745         struct mlx5_flow_counter_ext *cnt_ext = NULL;
4746
4747         if (!counter)
4748                 return;
4749         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
4750         MLX5_ASSERT(pool);
4751         if (counter < MLX5_CNT_BATCH_OFFSET) {
4752                 cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt);
4753                 if (cnt_ext) {
4754                         if (--cnt_ext->ref_cnt)
4755                                 return;
4756                         if (cnt_ext->shared)
4757                                 mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl,
4758                                                      cnt_ext->id);
4759                 }
4760         }
4761         if (IS_AGE_POOL(pool))
4762                 flow_dv_counter_remove_from_age(dev, counter, cnt);
4763         cnt->pool = pool;
4764         /*
4765          * Put the counter back to list to be updated in none fallback mode.
4766          * Currently, we are using two list alternately, while one is in query,
4767          * add the freed counter to the other list based on the pool query_gen
4768          * value. After query finishes, add counter the list to the global
4769          * container counter list. The list changes while query starts. In
4770          * this case, lock will not be needed as query callback and release
4771          * function both operate with the different list.
4772          *
4773          */
4774         if (!priv->counter_fallback)
4775                 TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
4776         else
4777                 TAILQ_INSERT_TAIL(&((MLX5_CNT_CONTAINER
4778                                   (priv->sh, 0, 0))->counters),
4779                                   cnt, next);
4780 }
4781
4782 /**
4783  * Verify the @p attributes will be correctly understood by the NIC and store
4784  * them in the @p flow if everything is correct.
4785  *
4786  * @param[in] dev
4787  *   Pointer to dev struct.
4788  * @param[in] attributes
4789  *   Pointer to flow attributes
4790  * @param[in] external
4791  *   This flow rule is created by request external to PMD.
4792  * @param[out] error
4793  *   Pointer to error structure.
4794  *
4795  * @return
4796  *   - 0 on success and non root table.
4797  *   - 1 on success and root table.
4798  *   - a negative errno value otherwise and rte_errno is set.
4799  */
4800 static int
4801 flow_dv_validate_attributes(struct rte_eth_dev *dev,
4802                             const struct rte_flow_attr *attributes,
4803                             bool external __rte_unused,
4804                             struct rte_flow_error *error)
4805 {
4806         struct mlx5_priv *priv = dev->data->dev_private;
4807         uint32_t priority_max = priv->config.flow_prio - 1;
4808         int ret = 0;
4809
4810 #ifndef HAVE_MLX5DV_DR
4811         if (attributes->group)
4812                 return rte_flow_error_set(error, ENOTSUP,
4813                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
4814                                           NULL,
4815                                           "groups are not supported");
4816 #else
4817         uint32_t table = 0;
4818
4819         ret = mlx5_flow_group_to_table(attributes, external,
4820                                        attributes->group, !!priv->fdb_def_rule,
4821                                        &table, error);
4822         if (ret)
4823                 return ret;
4824         if (!table)
4825                 ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
4826 #endif
4827         if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
4828             attributes->priority >= priority_max)
4829                 return rte_flow_error_set(error, ENOTSUP,
4830                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
4831                                           NULL,
4832                                           "priority out of range");
4833         if (attributes->transfer) {
4834                 if (!priv->config.dv_esw_en)
4835                         return rte_flow_error_set
4836                                 (error, ENOTSUP,
4837                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4838                                  "E-Switch dr is not supported");
4839                 if (!(priv->representor || priv->master))
4840                         return rte_flow_error_set
4841                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4842                                  NULL, "E-Switch configuration can only be"
4843                                  " done by a master or a representor device");
4844                 if (attributes->egress)
4845                         return rte_flow_error_set
4846                                 (error, ENOTSUP,
4847                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
4848                                  "egress is not supported");
4849         }
4850         if (!(attributes->egress ^ attributes->ingress))
4851                 return rte_flow_error_set(error, ENOTSUP,
4852                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
4853                                           "must specify exactly one of "
4854                                           "ingress or egress");
4855         return ret;
4856 }
4857
4858 /**
4859  * Internal validation function. For validating both actions and items.
4860  *
4861  * @param[in] dev
4862  *   Pointer to the rte_eth_dev structure.
4863  * @param[in] attr
4864  *   Pointer to the flow attributes.
4865  * @param[in] items
4866  *   Pointer to the list of items.
4867  * @param[in] actions
4868  *   Pointer to the list of actions.
4869  * @param[in] external
4870  *   This flow rule is created by request external to PMD.
4871  * @param[in] hairpin
4872  *   Number of hairpin TX actions, 0 means classic flow.
4873  * @param[out] error
4874  *   Pointer to the error structure.
4875  *
4876  * @return
4877  *   0 on success, a negative errno value otherwise and rte_errno is set.
4878  */
4879 static int
4880 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
4881                  const struct rte_flow_item items[],
4882                  const struct rte_flow_action actions[],
4883                  bool external, int hairpin, struct rte_flow_error *error)
4884 {
4885         int ret;
4886         uint64_t action_flags = 0;
4887         uint64_t item_flags = 0;
4888         uint64_t last_item = 0;
4889         uint8_t next_protocol = 0xff;
4890         uint16_t ether_type = 0;
4891         int actions_n = 0;
4892         uint8_t item_ipv6_proto = 0;
4893         const struct rte_flow_item *gre_item = NULL;
4894         const struct rte_flow_action_raw_decap *decap;
4895         const struct rte_flow_action_raw_encap *encap;
4896         const struct rte_flow_action_rss *rss;
4897         const struct rte_flow_item_tcp nic_tcp_mask = {
4898                 .hdr = {
4899                         .tcp_flags = 0xFF,
4900                         .src_port = RTE_BE16(UINT16_MAX),
4901                         .dst_port = RTE_BE16(UINT16_MAX),
4902                 }
4903         };
4904         const struct rte_flow_item_ipv4 nic_ipv4_mask = {
4905                 .hdr = {
4906                         .src_addr = RTE_BE32(0xffffffff),
4907                         .dst_addr = RTE_BE32(0xffffffff),
4908                         .type_of_service = 0xff,
4909                         .next_proto_id = 0xff,
4910                         .time_to_live = 0xff,
4911                 },
4912         };
4913         const struct rte_flow_item_ipv6 nic_ipv6_mask = {
4914                 .hdr = {
4915                         .src_addr =
4916                         "\xff\xff\xff\xff\xff\xff\xff\xff"
4917                         "\xff\xff\xff\xff\xff\xff\xff\xff",
4918                         .dst_addr =
4919                         "\xff\xff\xff\xff\xff\xff\xff\xff"
4920                         "\xff\xff\xff\xff\xff\xff\xff\xff",
4921                         .vtc_flow = RTE_BE32(0xffffffff),
4922                         .proto = 0xff,
4923                         .hop_limits = 0xff,
4924                 },
4925         };
4926         const struct rte_flow_item_ecpri nic_ecpri_mask = {
4927                 .hdr = {
4928                         .common = {
4929                                 .u32 =
4930                                 RTE_BE32(((const struct rte_ecpri_common_hdr) {
4931                                         .type = 0xFF,
4932                                         }).u32),
4933                         },
4934                         .dummy[0] = 0xffffffff,
4935                 },
4936         };
4937         struct mlx5_priv *priv = dev->data->dev_private;
4938         struct mlx5_dev_config *dev_conf = &priv->config;
4939         uint16_t queue_index = 0xFFFF;
4940         const struct rte_flow_item_vlan *vlan_m = NULL;
4941         int16_t rw_act_num = 0;
4942         uint64_t is_root;
4943
4944         if (items == NULL)
4945                 return -1;
4946         ret = flow_dv_validate_attributes(dev, attr, external, error);
4947         if (ret < 0)
4948                 return ret;
4949         is_root = (uint64_t)ret;
4950         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
4951                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
4952                 int type = items->type;
4953
4954                 if (!mlx5_flow_os_item_supported(type))
4955                         return rte_flow_error_set(error, ENOTSUP,
4956                                                   RTE_FLOW_ERROR_TYPE_ITEM,
4957                                                   NULL, "item not supported");
4958                 switch (type) {
4959                 case RTE_FLOW_ITEM_TYPE_VOID:
4960                         break;
4961                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
4962                         ret = flow_dv_validate_item_port_id
4963                                         (dev, items, attr, item_flags, error);
4964                         if (ret < 0)
4965                                 return ret;
4966                         last_item = MLX5_FLOW_ITEM_PORT_ID;
4967                         break;
4968                 case RTE_FLOW_ITEM_TYPE_ETH:
4969                         ret = mlx5_flow_validate_item_eth(items, item_flags,
4970                                                           error);
4971                         if (ret < 0)
4972                                 return ret;
4973                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
4974                                              MLX5_FLOW_LAYER_OUTER_L2;
4975                         if (items->mask != NULL && items->spec != NULL) {
4976                                 ether_type =
4977                                         ((const struct rte_flow_item_eth *)
4978                                          items->spec)->type;
4979                                 ether_type &=
4980                                         ((const struct rte_flow_item_eth *)
4981                                          items->mask)->type;
4982                                 ether_type = rte_be_to_cpu_16(ether_type);
4983                         } else {
4984                                 ether_type = 0;
4985                         }
4986                         break;
4987                 case RTE_FLOW_ITEM_TYPE_VLAN:
4988                         ret = flow_dv_validate_item_vlan(items, item_flags,
4989                                                          dev, error);
4990                         if (ret < 0)
4991                                 return ret;
4992                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
4993                                              MLX5_FLOW_LAYER_OUTER_VLAN;
4994                         if (items->mask != NULL && items->spec != NULL) {
4995                                 ether_type =
4996                                         ((const struct rte_flow_item_vlan *)
4997                                          items->spec)->inner_type;
4998                                 ether_type &=
4999                                         ((const struct rte_flow_item_vlan *)
5000                                          items->mask)->inner_type;
5001                                 ether_type = rte_be_to_cpu_16(ether_type);
5002                         } else {
5003                                 ether_type = 0;
5004                         }
5005                         /* Store outer VLAN mask for of_push_vlan action. */
5006                         if (!tunnel)
5007                                 vlan_m = items->mask;
5008                         break;
5009                 case RTE_FLOW_ITEM_TYPE_IPV4:
5010                         mlx5_flow_tunnel_ip_check(items, next_protocol,
5011                                                   &item_flags, &tunnel);
5012                         ret = mlx5_flow_validate_item_ipv4(items, item_flags,
5013                                                            last_item,
5014                                                            ether_type,
5015                                                            &nic_ipv4_mask,
5016                                                            error);
5017                         if (ret < 0)
5018                                 return ret;
5019                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
5020                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
5021                         if (items->mask != NULL &&
5022                             ((const struct rte_flow_item_ipv4 *)
5023                              items->mask)->hdr.next_proto_id) {
5024                                 next_protocol =
5025                                         ((const struct rte_flow_item_ipv4 *)
5026                                          (items->spec))->hdr.next_proto_id;
5027                                 next_protocol &=
5028                                         ((const struct rte_flow_item_ipv4 *)
5029                                          (items->mask))->hdr.next_proto_id;
5030                         } else {
5031                                 /* Reset for inner layer. */
5032                                 next_protocol = 0xff;
5033                         }
5034                         break;
5035                 case RTE_FLOW_ITEM_TYPE_IPV6:
5036                         mlx5_flow_tunnel_ip_check(items, next_protocol,
5037                                                   &item_flags, &tunnel);
5038                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
5039                                                            last_item,
5040                                                            ether_type,
5041                                                            &nic_ipv6_mask,
5042                                                            error);
5043                         if (ret < 0)
5044                                 return ret;
5045                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
5046                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
5047                         if (items->mask != NULL &&
5048                             ((const struct rte_flow_item_ipv6 *)
5049                              items->mask)->hdr.proto) {
5050                                 item_ipv6_proto =
5051                                         ((const struct rte_flow_item_ipv6 *)
5052                                          items->spec)->hdr.proto;
5053                                 next_protocol =
5054                                         ((const struct rte_flow_item_ipv6 *)
5055                                          items->spec)->hdr.proto;
5056                                 next_protocol &=
5057                                         ((const struct rte_flow_item_ipv6 *)
5058                                          items->mask)->hdr.proto;
5059                         } else {
5060                                 /* Reset for inner layer. */
5061                                 next_protocol = 0xff;
5062                         }
5063                         break;
5064                 case RTE_FLOW_ITEM_TYPE_TCP:
5065                         ret = mlx5_flow_validate_item_tcp
5066                                                 (items, item_flags,
5067                                                  next_protocol,
5068                                                  &nic_tcp_mask,
5069                                                  error);
5070                         if (ret < 0)
5071                                 return ret;
5072                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
5073                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
5074                         break;
5075                 case RTE_FLOW_ITEM_TYPE_UDP:
5076                         ret = mlx5_flow_validate_item_udp(items, item_flags,
5077                                                           next_protocol,
5078                                                           error);
5079                         if (ret < 0)
5080                                 return ret;
5081                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
5082                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
5083                         break;
5084                 case RTE_FLOW_ITEM_TYPE_GRE:
5085                         ret = mlx5_flow_validate_item_gre(items, item_flags,
5086                                                           next_protocol, error);
5087                         if (ret < 0)
5088                                 return ret;
5089                         gre_item = items;
5090                         last_item = MLX5_FLOW_LAYER_GRE;
5091                         break;
5092                 case RTE_FLOW_ITEM_TYPE_NVGRE:
5093                         ret = mlx5_flow_validate_item_nvgre(items, item_flags,
5094                                                             next_protocol,
5095                                                             error);
5096                         if (ret < 0)
5097                                 return ret;
5098                         last_item = MLX5_FLOW_LAYER_NVGRE;
5099                         break;
5100                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
5101                         ret = mlx5_flow_validate_item_gre_key
5102                                 (items, item_flags, gre_item, error);
5103                         if (ret < 0)
5104                                 return ret;
5105                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
5106                         break;
5107                 case RTE_FLOW_ITEM_TYPE_VXLAN:
5108                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
5109                                                             error);
5110                         if (ret < 0)
5111                                 return ret;
5112                         last_item = MLX5_FLOW_LAYER_VXLAN;
5113                         break;
5114                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
5115                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
5116                                                                 item_flags, dev,
5117                                                                 error);
5118                         if (ret < 0)
5119                                 return ret;
5120                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
5121                         break;
5122                 case RTE_FLOW_ITEM_TYPE_GENEVE:
5123                         ret = mlx5_flow_validate_item_geneve(items,
5124                                                              item_flags, dev,
5125                                                              error);
5126                         if (ret < 0)
5127                                 return ret;
5128                         last_item = MLX5_FLOW_LAYER_GENEVE;
5129                         break;
5130                 case RTE_FLOW_ITEM_TYPE_MPLS:
5131                         ret = mlx5_flow_validate_item_mpls(dev, items,
5132                                                            item_flags,
5133                                                            last_item, error);
5134                         if (ret < 0)
5135                                 return ret;
5136                         last_item = MLX5_FLOW_LAYER_MPLS;
5137                         break;
5138
5139                 case RTE_FLOW_ITEM_TYPE_MARK:
5140                         ret = flow_dv_validate_item_mark(dev, items, attr,
5141                                                          error);
5142                         if (ret < 0)
5143                                 return ret;
5144                         last_item = MLX5_FLOW_ITEM_MARK;
5145                         break;
5146                 case RTE_FLOW_ITEM_TYPE_META:
5147                         ret = flow_dv_validate_item_meta(dev, items, attr,
5148                                                          error);
5149                         if (ret < 0)
5150                                 return ret;
5151                         last_item = MLX5_FLOW_ITEM_METADATA;
5152                         break;
5153                 case RTE_FLOW_ITEM_TYPE_ICMP:
5154                         ret = mlx5_flow_validate_item_icmp(items, item_flags,
5155                                                            next_protocol,
5156                                                            error);
5157                         if (ret < 0)
5158                                 return ret;
5159                         last_item = MLX5_FLOW_LAYER_ICMP;
5160                         break;
5161                 case RTE_FLOW_ITEM_TYPE_ICMP6:
5162                         ret = mlx5_flow_validate_item_icmp6(items, item_flags,
5163                                                             next_protocol,
5164                                                             error);
5165                         if (ret < 0)
5166                                 return ret;
5167                         item_ipv6_proto = IPPROTO_ICMPV6;
5168                         last_item = MLX5_FLOW_LAYER_ICMP6;
5169                         break;
5170                 case RTE_FLOW_ITEM_TYPE_TAG:
5171                         ret = flow_dv_validate_item_tag(dev, items,
5172                                                         attr, error);
5173                         if (ret < 0)
5174                                 return ret;
5175                         last_item = MLX5_FLOW_ITEM_TAG;
5176                         break;
5177                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
5178                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
5179                         break;
5180                 case RTE_FLOW_ITEM_TYPE_GTP:
5181                         ret = flow_dv_validate_item_gtp(dev, items, item_flags,
5182                                                         error);
5183                         if (ret < 0)
5184                                 return ret;
5185                         last_item = MLX5_FLOW_LAYER_GTP;
5186                         break;
5187                 case RTE_FLOW_ITEM_TYPE_ECPRI:
5188                         /* Capacity will be checked in the translate stage. */
5189                         ret = mlx5_flow_validate_item_ecpri(items, item_flags,
5190                                                             last_item,
5191                                                             ether_type,
5192                                                             &nic_ecpri_mask,
5193                                                             error);
5194                         if (ret < 0)
5195                                 return ret;
5196                         last_item = MLX5_FLOW_LAYER_ECPRI;
5197                         break;
5198                 default:
5199                         return rte_flow_error_set(error, ENOTSUP,
5200                                                   RTE_FLOW_ERROR_TYPE_ITEM,
5201                                                   NULL, "item not supported");
5202                 }
5203                 item_flags |= last_item;
5204         }
5205         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
5206                 int type = actions->type;
5207
5208                 if (!mlx5_flow_os_action_supported(type))
5209                         return rte_flow_error_set(error, ENOTSUP,
5210                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5211                                                   actions,
5212                                                   "action not supported");
5213                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
5214                         return rte_flow_error_set(error, ENOTSUP,
5215                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5216                                                   actions, "too many actions");
5217                 switch (type) {
5218                 case RTE_FLOW_ACTION_TYPE_VOID:
5219                         break;
5220                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
5221                         ret = flow_dv_validate_action_port_id(dev,
5222                                                               action_flags,
5223                                                               actions,
5224                                                               attr,
5225                                                               error);
5226                         if (ret)
5227                                 return ret;
5228                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5229                         ++actions_n;
5230                         break;
5231                 case RTE_FLOW_ACTION_TYPE_FLAG:
5232                         ret = flow_dv_validate_action_flag(dev, action_flags,
5233                                                            attr, error);
5234                         if (ret < 0)
5235                                 return ret;
5236                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
5237                                 /* Count all modify-header actions as one. */
5238                                 if (!(action_flags &
5239                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
5240                                         ++actions_n;
5241                                 action_flags |= MLX5_FLOW_ACTION_FLAG |
5242                                                 MLX5_FLOW_ACTION_MARK_EXT;
5243                         } else {
5244                                 action_flags |= MLX5_FLOW_ACTION_FLAG;
5245                                 ++actions_n;
5246                         }
5247                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
5248                         break;
5249                 case RTE_FLOW_ACTION_TYPE_MARK:
5250                         ret = flow_dv_validate_action_mark(dev, actions,
5251                                                            action_flags,
5252                                                            attr, error);
5253                         if (ret < 0)
5254                                 return ret;
5255                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
5256                                 /* Count all modify-header actions as one. */
5257                                 if (!(action_flags &
5258                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
5259                                         ++actions_n;
5260                                 action_flags |= MLX5_FLOW_ACTION_MARK |
5261                                                 MLX5_FLOW_ACTION_MARK_EXT;
5262                         } else {
5263                                 action_flags |= MLX5_FLOW_ACTION_MARK;
5264                                 ++actions_n;
5265                         }
5266                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
5267                         break;
5268                 case RTE_FLOW_ACTION_TYPE_SET_META:
5269                         ret = flow_dv_validate_action_set_meta(dev, actions,
5270                                                                action_flags,
5271                                                                attr, error);
5272                         if (ret < 0)
5273                                 return ret;
5274                         /* Count all modify-header actions as one action. */
5275                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5276                                 ++actions_n;
5277                         action_flags |= MLX5_FLOW_ACTION_SET_META;
5278                         rw_act_num += MLX5_ACT_NUM_SET_META;
5279                         break;
5280                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
5281                         ret = flow_dv_validate_action_set_tag(dev, actions,
5282                                                               action_flags,
5283                                                               attr, error);
5284                         if (ret < 0)
5285                                 return ret;
5286                         /* Count all modify-header actions as one action. */
5287                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5288                                 ++actions_n;
5289                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
5290                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
5291                         break;
5292                 case RTE_FLOW_ACTION_TYPE_DROP:
5293                         ret = mlx5_flow_validate_action_drop(action_flags,
5294                                                              attr, error);
5295                         if (ret < 0)
5296                                 return ret;
5297                         action_flags |= MLX5_FLOW_ACTION_DROP;
5298                         ++actions_n;
5299                         break;
5300                 case RTE_FLOW_ACTION_TYPE_QUEUE:
5301                         ret = mlx5_flow_validate_action_queue(actions,
5302                                                               action_flags, dev,
5303                                                               attr, error);
5304                         if (ret < 0)
5305                                 return ret;
5306                         queue_index = ((const struct rte_flow_action_queue *)
5307                                                         (actions->conf))->index;
5308                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
5309                         ++actions_n;
5310                         break;
5311                 case RTE_FLOW_ACTION_TYPE_RSS:
5312                         rss = actions->conf;
5313                         ret = mlx5_flow_validate_action_rss(actions,
5314                                                             action_flags, dev,
5315                                                             attr, item_flags,
5316                                                             error);
5317                         if (ret < 0)
5318                                 return ret;
5319                         if (rss != NULL && rss->queue_num)
5320                                 queue_index = rss->queue[0];
5321                         action_flags |= MLX5_FLOW_ACTION_RSS;
5322                         ++actions_n;
5323                         break;
5324                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
5325                         ret =
5326                         mlx5_flow_validate_action_default_miss(action_flags,
5327                                         attr, error);
5328                         if (ret < 0)
5329                                 return ret;
5330                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
5331                         ++actions_n;
5332                         break;
5333                 case RTE_FLOW_ACTION_TYPE_COUNT:
5334                         ret = flow_dv_validate_action_count(dev, error);
5335                         if (ret < 0)
5336                                 return ret;
5337                         action_flags |= MLX5_FLOW_ACTION_COUNT;
5338                         ++actions_n;
5339                         break;
5340                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
5341                         if (flow_dv_validate_action_pop_vlan(dev,
5342                                                              action_flags,
5343                                                              actions,
5344                                                              item_flags, attr,
5345                                                              error))
5346                                 return -rte_errno;
5347                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
5348                         ++actions_n;
5349                         break;
5350                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
5351                         ret = flow_dv_validate_action_push_vlan(dev,
5352                                                                 action_flags,
5353                                                                 vlan_m,
5354                                                                 actions, attr,
5355                                                                 error);
5356                         if (ret < 0)
5357                                 return ret;
5358                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
5359                         ++actions_n;
5360                         break;
5361                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
5362                         ret = flow_dv_validate_action_set_vlan_pcp
5363                                                 (action_flags, actions, error);
5364                         if (ret < 0)
5365                                 return ret;
5366                         /* Count PCP with push_vlan command. */
5367                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
5368                         break;
5369                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
5370                         ret = flow_dv_validate_action_set_vlan_vid
5371                                                 (item_flags, action_flags,
5372                                                  actions, error);
5373                         if (ret < 0)
5374                                 return ret;
5375                         /* Count VID with push_vlan command. */
5376                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
5377                         rw_act_num += MLX5_ACT_NUM_MDF_VID;
5378                         break;
5379                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5380                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5381                         ret = flow_dv_validate_action_l2_encap(dev,
5382                                                                action_flags,
5383                                                                actions, attr,
5384                                                                error);
5385                         if (ret < 0)
5386                                 return ret;
5387                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
5388                         ++actions_n;
5389                         break;
5390                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
5391                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
5392                         ret = flow_dv_validate_action_decap(dev, action_flags,
5393                                                             attr, error);
5394                         if (ret < 0)
5395                                 return ret;
5396                         action_flags |= MLX5_FLOW_ACTION_DECAP;
5397                         ++actions_n;
5398                         break;
5399                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5400                         ret = flow_dv_validate_action_raw_encap_decap
5401                                 (dev, NULL, actions->conf, attr, &action_flags,
5402                                  &actions_n, error);
5403                         if (ret < 0)
5404                                 return ret;
5405                         break;
5406                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
5407                         decap = actions->conf;
5408                         while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
5409                                 ;
5410                         if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
5411                                 encap = NULL;
5412                                 actions--;
5413                         } else {
5414                                 encap = actions->conf;
5415                         }
5416                         ret = flow_dv_validate_action_raw_encap_decap
5417                                            (dev,
5418                                             decap ? decap : &empty_decap, encap,
5419                                             attr, &action_flags, &actions_n,
5420                                             error);
5421                         if (ret < 0)
5422                                 return ret;
5423                         break;
5424                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
5425                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
5426                         ret = flow_dv_validate_action_modify_mac(action_flags,
5427                                                                  actions,
5428                                                                  item_flags,
5429                                                                  error);
5430                         if (ret < 0)
5431                                 return ret;
5432                         /* Count all modify-header actions as one action. */
5433                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5434                                 ++actions_n;
5435                         action_flags |= actions->type ==
5436                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
5437                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
5438                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
5439                         /*
5440                          * Even if the source and destination MAC addresses have
5441                          * overlap in the header with 4B alignment, the convert
5442                          * function will handle them separately and 4 SW actions
5443                          * will be created. And 2 actions will be added each
5444                          * time no matter how many bytes of address will be set.
5445                          */
5446                         rw_act_num += MLX5_ACT_NUM_MDF_MAC;
5447                         break;
5448                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
5449                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
5450                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
5451                                                                   actions,
5452                                                                   item_flags,
5453                                                                   error);
5454                         if (ret < 0)
5455                                 return ret;
5456                         /* Count all modify-header actions as one action. */
5457                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5458                                 ++actions_n;
5459                         action_flags |= actions->type ==
5460                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
5461                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
5462                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
5463                         rw_act_num += MLX5_ACT_NUM_MDF_IPV4;
5464                         break;
5465                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
5466                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
5467                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
5468                                                                   actions,
5469                                                                   item_flags,
5470                                                                   error);
5471                         if (ret < 0)
5472                                 return ret;
5473                         if (item_ipv6_proto == IPPROTO_ICMPV6)
5474                                 return rte_flow_error_set(error, ENOTSUP,
5475                                         RTE_FLOW_ERROR_TYPE_ACTION,
5476                                         actions,
5477                                         "Can't change header "
5478                                         "with ICMPv6 proto");
5479                         /* Count all modify-header actions as one action. */
5480                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5481                                 ++actions_n;
5482                         action_flags |= actions->type ==
5483                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
5484                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
5485                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
5486                         rw_act_num += MLX5_ACT_NUM_MDF_IPV6;
5487                         break;
5488                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
5489                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
5490                         ret = flow_dv_validate_action_modify_tp(action_flags,
5491                                                                 actions,
5492                                                                 item_flags,
5493                                                                 error);
5494                         if (ret < 0)
5495                                 return ret;
5496                         /* Count all modify-header actions as one action. */
5497                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5498                                 ++actions_n;
5499                         action_flags |= actions->type ==
5500                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
5501                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
5502                                                 MLX5_FLOW_ACTION_SET_TP_DST;
5503                         rw_act_num += MLX5_ACT_NUM_MDF_PORT;
5504                         break;
5505                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
5506                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
5507                         ret = flow_dv_validate_action_modify_ttl(action_flags,
5508                                                                  actions,
5509                                                                  item_flags,
5510                                                                  error);
5511                         if (ret < 0)
5512                                 return ret;
5513                         /* Count all modify-header actions as one action. */
5514                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5515                                 ++actions_n;
5516                         action_flags |= actions->type ==
5517                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
5518                                                 MLX5_FLOW_ACTION_SET_TTL :
5519                                                 MLX5_FLOW_ACTION_DEC_TTL;
5520                         rw_act_num += MLX5_ACT_NUM_MDF_TTL;
5521                         break;
5522                 case RTE_FLOW_ACTION_TYPE_JUMP:
5523                         ret = flow_dv_validate_action_jump(actions,
5524                                                            action_flags,
5525                                                            attr, external,
5526                                                            error);
5527                         if (ret)
5528                                 return ret;
5529                         ++actions_n;
5530                         action_flags |= MLX5_FLOW_ACTION_JUMP;
5531                         break;
5532                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
5533                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
5534                         ret = flow_dv_validate_action_modify_tcp_seq
5535                                                                 (action_flags,
5536                                                                  actions,
5537                                                                  item_flags,
5538                                                                  error);
5539                         if (ret < 0)
5540                                 return ret;
5541                         /* Count all modify-header actions as one action. */
5542                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5543                                 ++actions_n;
5544                         action_flags |= actions->type ==
5545                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
5546                                                 MLX5_FLOW_ACTION_INC_TCP_SEQ :
5547                                                 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
5548                         rw_act_num += MLX5_ACT_NUM_MDF_TCPSEQ;
5549                         break;
5550                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
5551                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
5552                         ret = flow_dv_validate_action_modify_tcp_ack
5553                                                                 (action_flags,
5554                                                                  actions,
5555                                                                  item_flags,
5556                                                                  error);
5557                         if (ret < 0)
5558                                 return ret;
5559                         /* Count all modify-header actions as one action. */
5560                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5561                                 ++actions_n;
5562                         action_flags |= actions->type ==
5563                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
5564                                                 MLX5_FLOW_ACTION_INC_TCP_ACK :
5565                                                 MLX5_FLOW_ACTION_DEC_TCP_ACK;
5566                         rw_act_num += MLX5_ACT_NUM_MDF_TCPACK;
5567                         break;
5568                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
5569                         break;
5570                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
5571                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
5572                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
5573                         break;
5574                 case RTE_FLOW_ACTION_TYPE_METER:
5575                         ret = mlx5_flow_validate_action_meter(dev,
5576                                                               action_flags,
5577                                                               actions, attr,
5578                                                               error);
5579                         if (ret < 0)
5580                                 return ret;
5581                         action_flags |= MLX5_FLOW_ACTION_METER;
5582                         ++actions_n;
5583                         /* Meter action will add one more TAG action. */
5584                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
5585                         break;
5586                 case RTE_FLOW_ACTION_TYPE_AGE:
5587                         ret = flow_dv_validate_action_age(action_flags,
5588                                                           actions, dev,
5589                                                           error);
5590                         if (ret < 0)
5591                                 return ret;
5592                         action_flags |= MLX5_FLOW_ACTION_AGE;
5593                         ++actions_n;
5594                         break;
5595                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
5596                         ret = flow_dv_validate_action_modify_ipv4_dscp
5597                                                          (action_flags,
5598                                                           actions,
5599                                                           item_flags,
5600                                                           error);
5601                         if (ret < 0)
5602                                 return ret;
5603                         /* Count all modify-header actions as one action. */
5604                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5605                                 ++actions_n;
5606                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
5607                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
5608                         break;
5609                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
5610                         ret = flow_dv_validate_action_modify_ipv6_dscp
5611                                                                 (action_flags,
5612                                                                  actions,
5613                                                                  item_flags,
5614                                                                  error);
5615                         if (ret < 0)
5616                                 return ret;
5617                         /* Count all modify-header actions as one action. */
5618                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5619                                 ++actions_n;
5620                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
5621                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
5622                         break;
5623                 default:
5624                         return rte_flow_error_set(error, ENOTSUP,
5625                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5626                                                   actions,
5627                                                   "action not supported");
5628                 }
5629         }
5630         /*
5631          * Validate the drop action mutual exclusion with other actions.
5632          * Drop action is mutually-exclusive with any other action, except for
5633          * Count action.
5634          */
5635         if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
5636             (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
5637                 return rte_flow_error_set(error, EINVAL,
5638                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5639                                           "Drop action is mutually-exclusive "
5640                                           "with any other action, except for "
5641                                           "Count action");
5642         /* Eswitch has few restrictions on using items and actions */
5643         if (attr->transfer) {
5644                 if (!mlx5_flow_ext_mreg_supported(dev) &&
5645                     action_flags & MLX5_FLOW_ACTION_FLAG)
5646                         return rte_flow_error_set(error, ENOTSUP,
5647                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5648                                                   NULL,
5649                                                   "unsupported action FLAG");
5650                 if (!mlx5_flow_ext_mreg_supported(dev) &&
5651                     action_flags & MLX5_FLOW_ACTION_MARK)
5652                         return rte_flow_error_set(error, ENOTSUP,
5653                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5654                                                   NULL,
5655                                                   "unsupported action MARK");
5656                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
5657                         return rte_flow_error_set(error, ENOTSUP,
5658                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5659                                                   NULL,
5660                                                   "unsupported action QUEUE");
5661                 if (action_flags & MLX5_FLOW_ACTION_RSS)
5662                         return rte_flow_error_set(error, ENOTSUP,
5663                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5664                                                   NULL,
5665                                                   "unsupported action RSS");
5666                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
5667                         return rte_flow_error_set(error, EINVAL,
5668                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5669                                                   actions,
5670                                                   "no fate action is found");
5671         } else {
5672                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
5673                         return rte_flow_error_set(error, EINVAL,
5674                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5675                                                   actions,
5676                                                   "no fate action is found");
5677         }
5678         /* Continue validation for Xcap actions.*/
5679         if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) && (queue_index == 0xFFFF ||
5680             mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
5681                 if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
5682                     MLX5_FLOW_XCAP_ACTIONS)
5683                         return rte_flow_error_set(error, ENOTSUP,
5684                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5685                                                   NULL, "encap and decap "
5686                                                   "combination aren't supported");
5687                 if (!attr->transfer && attr->ingress && (action_flags &
5688                                                         MLX5_FLOW_ACTION_ENCAP))
5689                         return rte_flow_error_set(error, ENOTSUP,
5690                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5691                                                   NULL, "encap is not supported"
5692                                                   " for ingress traffic");
5693         }
5694         /* Hairpin flow will add one more TAG action. */
5695         if (hairpin > 0)
5696                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
5697         /* extra metadata enabled: one more TAG action will be add. */
5698         if (dev_conf->dv_flow_en &&
5699             dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
5700             mlx5_flow_ext_mreg_supported(dev))
5701                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
5702         if ((uint32_t)rw_act_num >
5703                         flow_dv_modify_hdr_action_max(dev, is_root)) {
5704                 return rte_flow_error_set(error, ENOTSUP,
5705                                           RTE_FLOW_ERROR_TYPE_ACTION,
5706                                           NULL, "too many header modify"
5707                                           " actions to support");
5708         }
5709         return 0;
5710 }
5711
5712 /**
5713  * Internal preparation function. Allocates the DV flow size,
5714  * this size is constant.
5715  *
5716  * @param[in] dev
5717  *   Pointer to the rte_eth_dev structure.
5718  * @param[in] attr
5719  *   Pointer to the flow attributes.
5720  * @param[in] items
5721  *   Pointer to the list of items.
5722  * @param[in] actions
5723  *   Pointer to the list of actions.
5724  * @param[out] error
5725  *   Pointer to the error structure.
5726  *
5727  * @return
5728  *   Pointer to mlx5_flow object on success,
5729  *   otherwise NULL and rte_errno is set.
5730  */
5731 static struct mlx5_flow *
5732 flow_dv_prepare(struct rte_eth_dev *dev,
5733                 const struct rte_flow_attr *attr __rte_unused,
5734                 const struct rte_flow_item items[] __rte_unused,
5735                 const struct rte_flow_action actions[] __rte_unused,
5736                 struct rte_flow_error *error)
5737 {
5738         uint32_t handle_idx = 0;
5739         struct mlx5_flow *dev_flow;
5740         struct mlx5_flow_handle *dev_handle;
5741         struct mlx5_priv *priv = dev->data->dev_private;
5742
5743         /* In case of corrupting the memory. */
5744         if (priv->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
5745                 rte_flow_error_set(error, ENOSPC,
5746                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5747                                    "not free temporary device flow");
5748                 return NULL;
5749         }
5750         dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
5751                                    &handle_idx);
5752         if (!dev_handle) {
5753                 rte_flow_error_set(error, ENOMEM,
5754                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5755                                    "not enough memory to create flow handle");
5756                 return NULL;
5757         }
5758         /* No multi-thread supporting. */
5759         dev_flow = &((struct mlx5_flow *)priv->inter_flows)[priv->flow_idx++];
5760         dev_flow->handle = dev_handle;
5761         dev_flow->handle_idx = handle_idx;
5762         /*
5763          * In some old rdma-core releases, before continuing, a check of the
5764          * length of matching parameter will be done at first. It needs to use
5765          * the length without misc4 param. If the flow has misc4 support, then
5766          * the length needs to be adjusted accordingly. Each param member is
5767          * aligned with a 64B boundary naturally.
5768          */
5769         dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param) -
5770                                   MLX5_ST_SZ_BYTES(fte_match_set_misc4);
5771         /*
5772          * The matching value needs to be cleared to 0 before using. In the
5773          * past, it will be automatically cleared when using rte_*alloc
5774          * API. The time consumption will be almost the same as before.
5775          */
5776         memset(dev_flow->dv.value.buf, 0, MLX5_ST_SZ_BYTES(fte_match_param));
5777         dev_flow->ingress = attr->ingress;
5778         dev_flow->dv.transfer = attr->transfer;
5779         return dev_flow;
5780 }
5781
5782 #ifdef RTE_LIBRTE_MLX5_DEBUG
5783 /**
5784  * Sanity check for match mask and value. Similar to check_valid_spec() in
5785  * kernel driver. If unmasked bit is present in value, it returns failure.
5786  *
5787  * @param match_mask
5788  *   pointer to match mask buffer.
5789  * @param match_value
5790  *   pointer to match value buffer.
5791  *
5792  * @return
5793  *   0 if valid, -EINVAL otherwise.
5794  */
5795 static int
5796 flow_dv_check_valid_spec(void *match_mask, void *match_value)
5797 {
5798         uint8_t *m = match_mask;
5799         uint8_t *v = match_value;
5800         unsigned int i;
5801
5802         for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
5803                 if (v[i] & ~m[i]) {
5804                         DRV_LOG(ERR,
5805                                 "match_value differs from match_criteria"
5806                                 " %p[%u] != %p[%u]",
5807                                 match_value, i, match_mask, i);
5808                         return -EINVAL;
5809                 }
5810         }
5811         return 0;
5812 }
5813 #endif
5814
5815 /**
5816  * Add match of ip_version.
5817  *
5818  * @param[in] group
5819  *   Flow group.
5820  * @param[in] headers_v
5821  *   Values header pointer.
5822  * @param[in] headers_m
5823  *   Masks header pointer.
5824  * @param[in] ip_version
5825  *   The IP version to set.
5826  */
5827 static inline void
5828 flow_dv_set_match_ip_version(uint32_t group,
5829                              void *headers_v,
5830                              void *headers_m,
5831                              uint8_t ip_version)
5832 {
5833         if (group == 0)
5834                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
5835         else
5836                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
5837                          ip_version);
5838         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version);
5839         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0);
5840         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0);
5841 }
5842
5843 /**
5844  * Add Ethernet item to matcher and to the value.
5845  *
5846  * @param[in, out] matcher
5847  *   Flow matcher.
5848  * @param[in, out] key
5849  *   Flow matcher value.
5850  * @param[in] item
5851  *   Flow pattern to translate.
5852  * @param[in] inner
5853  *   Item is inner pattern.
5854  */
5855 static void
5856 flow_dv_translate_item_eth(void *matcher, void *key,
5857                            const struct rte_flow_item *item, int inner,
5858                            uint32_t group)
5859 {
5860         const struct rte_flow_item_eth *eth_m = item->mask;
5861         const struct rte_flow_item_eth *eth_v = item->spec;
5862         const struct rte_flow_item_eth nic_mask = {
5863                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
5864                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
5865                 .type = RTE_BE16(0xffff),
5866         };
5867         void *headers_m;
5868         void *headers_v;
5869         char *l24_v;
5870         unsigned int i;
5871
5872         if (!eth_v)
5873                 return;
5874         if (!eth_m)
5875                 eth_m = &nic_mask;
5876         if (inner) {
5877                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5878                                          inner_headers);
5879                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5880         } else {
5881                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5882                                          outer_headers);
5883                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5884         }
5885         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
5886                &eth_m->dst, sizeof(eth_m->dst));
5887         /* The value must be in the range of the mask. */
5888         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
5889         for (i = 0; i < sizeof(eth_m->dst); ++i)
5890                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
5891         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
5892                &eth_m->src, sizeof(eth_m->src));
5893         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
5894         /* The value must be in the range of the mask. */
5895         for (i = 0; i < sizeof(eth_m->dst); ++i)
5896                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
5897         if (eth_v->type) {
5898                 /* When ethertype is present set mask for tagged VLAN. */
5899                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
5900                 /* Set value for tagged VLAN if ethertype is 802.1Q. */
5901                 if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_VLAN) ||
5902                     eth_v->type == RTE_BE16(RTE_ETHER_TYPE_QINQ)) {
5903                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag,
5904                                  1);
5905                         /* Return here to avoid setting match on ethertype. */
5906                         return;
5907                 }
5908         }
5909         /*
5910          * HW supports match on one Ethertype, the Ethertype following the last
5911          * VLAN tag of the packet (see PRM).
5912          * Set match on ethertype only if ETH header is not followed by VLAN.
5913          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
5914          * ethertype, and use ip_version field instead.
5915          * eCPRI over Ether layer will use type value 0xAEFE.
5916          */
5917         if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_IPV4) &&
5918             eth_m->type == 0xFFFF) {
5919                 flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
5920         } else if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_IPV6) &&
5921                    eth_m->type == 0xFFFF) {
5922                 flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
5923         } else {
5924                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
5925                          rte_be_to_cpu_16(eth_m->type));
5926                 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
5927                                      ethertype);
5928                 *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
5929         }
5930 }
5931
5932 /**
5933  * Add VLAN item to matcher and to the value.
5934  *
5935  * @param[in, out] dev_flow
5936  *   Flow descriptor.
5937  * @param[in, out] matcher
5938  *   Flow matcher.
5939  * @param[in, out] key
5940  *   Flow matcher value.
5941  * @param[in] item
5942  *   Flow pattern to translate.
5943  * @param[in] inner
5944  *   Item is inner pattern.
5945  */
5946 static void
5947 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
5948                             void *matcher, void *key,
5949                             const struct rte_flow_item *item,
5950                             int inner, uint32_t group)
5951 {
5952         const struct rte_flow_item_vlan *vlan_m = item->mask;
5953         const struct rte_flow_item_vlan *vlan_v = item->spec;
5954         void *headers_m;
5955         void *headers_v;
5956         uint16_t tci_m;
5957         uint16_t tci_v;
5958
5959         if (inner) {
5960                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5961                                          inner_headers);
5962                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5963         } else {
5964                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5965                                          outer_headers);
5966                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5967                 /*
5968                  * This is workaround, masks are not supported,
5969                  * and pre-validated.
5970                  */
5971                 if (vlan_v)
5972                         dev_flow->handle->vf_vlan.tag =
5973                                         rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
5974         }
5975         /*
5976          * When VLAN item exists in flow, mark packet as tagged,
5977          * even if TCI is not specified.
5978          */
5979         MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
5980         MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
5981         if (!vlan_v)
5982                 return;
5983         if (!vlan_m)
5984                 vlan_m = &rte_flow_item_vlan_mask;
5985         tci_m = rte_be_to_cpu_16(vlan_m->tci);
5986         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
5987         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
5988         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
5989         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
5990         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
5991         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
5992         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
5993         /*
5994          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
5995          * ethertype, and use ip_version field instead.
5996          */
5997         if (vlan_v->inner_type == RTE_BE16(RTE_ETHER_TYPE_IPV4) &&
5998             vlan_m->inner_type == 0xFFFF) {
5999                 flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
6000         } else if (vlan_v->inner_type == RTE_BE16(RTE_ETHER_TYPE_IPV6) &&
6001                    vlan_m->inner_type == 0xFFFF) {
6002                 flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
6003         } else {
6004                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
6005                          rte_be_to_cpu_16(vlan_m->inner_type));
6006                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
6007                          rte_be_to_cpu_16(vlan_m->inner_type &
6008                                           vlan_v->inner_type));
6009         }
6010 }
6011
6012 /**
6013  * Add IPV4 item to matcher and to the value.
6014  *
6015  * @param[in, out] matcher
6016  *   Flow matcher.
6017  * @param[in, out] key
6018  *   Flow matcher value.
6019  * @param[in] item
6020  *   Flow pattern to translate.
6021  * @param[in] item_flags
6022  *   Bit-fields that holds the items detected until now.
6023  * @param[in] inner
6024  *   Item is inner pattern.
6025  * @param[in] group
6026  *   The group to insert the rule.
6027  */
6028 static void
6029 flow_dv_translate_item_ipv4(void *matcher, void *key,
6030                             const struct rte_flow_item *item,
6031                             const uint64_t item_flags,
6032                             int inner, uint32_t group)
6033 {
6034         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
6035         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
6036         const struct rte_flow_item_ipv4 nic_mask = {
6037                 .hdr = {
6038                         .src_addr = RTE_BE32(0xffffffff),
6039                         .dst_addr = RTE_BE32(0xffffffff),
6040                         .type_of_service = 0xff,
6041                         .next_proto_id = 0xff,
6042                         .time_to_live = 0xff,
6043                 },
6044         };
6045         void *headers_m;
6046         void *headers_v;
6047         char *l24_m;
6048         char *l24_v;
6049         uint8_t tos;
6050
6051         if (inner) {
6052                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6053                                          inner_headers);
6054                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6055         } else {
6056                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6057                                          outer_headers);
6058                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6059         }
6060         flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
6061         /*
6062          * On outer header (which must contains L2), or inner header with L2,
6063          * set cvlan_tag mask bit to mark this packet as untagged.
6064          * This should be done even if item->spec is empty.
6065          */
6066         if (!inner || item_flags & MLX5_FLOW_LAYER_INNER_L2)
6067                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
6068         if (!ipv4_v)
6069                 return;
6070         if (!ipv4_m)
6071                 ipv4_m = &nic_mask;
6072         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6073                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
6074         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6075                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
6076         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
6077         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
6078         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6079                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
6080         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6081                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
6082         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
6083         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
6084         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
6085         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
6086                  ipv4_m->hdr.type_of_service);
6087         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
6088         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
6089                  ipv4_m->hdr.type_of_service >> 2);
6090         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
6091         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
6092                  ipv4_m->hdr.next_proto_id);
6093         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6094                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
6095         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
6096                  ipv4_m->hdr.time_to_live);
6097         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
6098                  ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
6099 }
6100
6101 /**
6102  * Add IPV6 item to matcher and to the value.
6103  *
6104  * @param[in, out] matcher
6105  *   Flow matcher.
6106  * @param[in, out] key
6107  *   Flow matcher value.
6108  * @param[in] item
6109  *   Flow pattern to translate.
6110  * @param[in] item_flags
6111  *   Bit-fields that holds the items detected until now.
6112  * @param[in] inner
6113  *   Item is inner pattern.
6114  * @param[in] group
6115  *   The group to insert the rule.
6116  */
6117 static void
6118 flow_dv_translate_item_ipv6(void *matcher, void *key,
6119                             const struct rte_flow_item *item,
6120                             const uint64_t item_flags,
6121                             int inner, uint32_t group)
6122 {
6123         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
6124         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
6125         const struct rte_flow_item_ipv6 nic_mask = {
6126                 .hdr = {
6127                         .src_addr =
6128                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
6129                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
6130                         .dst_addr =
6131                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
6132                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
6133                         .vtc_flow = RTE_BE32(0xffffffff),
6134                         .proto = 0xff,
6135                         .hop_limits = 0xff,
6136                 },
6137         };
6138         void *headers_m;
6139         void *headers_v;
6140         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6141         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6142         char *l24_m;
6143         char *l24_v;
6144         uint32_t vtc_m;
6145         uint32_t vtc_v;
6146         int i;
6147         int size;
6148
6149         if (inner) {
6150                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6151                                          inner_headers);
6152                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6153         } else {
6154                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6155                                          outer_headers);
6156                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6157         }
6158         flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
6159         /*
6160          * On outer header (which must contains L2), or inner header with L2,
6161          * set cvlan_tag mask bit to mark this packet as untagged.
6162          * This should be done even if item->spec is empty.
6163          */
6164         if (!inner || item_flags & MLX5_FLOW_LAYER_INNER_L2)
6165                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
6166         if (!ipv6_v)
6167                 return;
6168         if (!ipv6_m)
6169                 ipv6_m = &nic_mask;
6170         size = sizeof(ipv6_m->hdr.dst_addr);
6171         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6172                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
6173         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6174                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
6175         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
6176         for (i = 0; i < size; ++i)
6177                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
6178         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6179                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
6180         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6181                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
6182         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
6183         for (i = 0; i < size; ++i)
6184                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
6185         /* TOS. */
6186         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
6187         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
6188         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
6189         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
6190         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
6191         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
6192         /* Label. */
6193         if (inner) {
6194                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
6195                          vtc_m);
6196                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
6197                          vtc_v);
6198         } else {
6199                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
6200                          vtc_m);
6201                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
6202                          vtc_v);
6203         }
6204         /* Protocol. */
6205         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
6206                  ipv6_m->hdr.proto);
6207         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6208                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
6209         /* Hop limit. */
6210         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
6211                  ipv6_m->hdr.hop_limits);
6212         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
6213                  ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
6214 }
6215
6216 /**
6217  * Add TCP item to matcher and to the value.
6218  *
6219  * @param[in, out] matcher
6220  *   Flow matcher.
6221  * @param[in, out] key
6222  *   Flow matcher value.
6223  * @param[in] item
6224  *   Flow pattern to translate.
6225  * @param[in] inner
6226  *   Item is inner pattern.
6227  */
6228 static void
6229 flow_dv_translate_item_tcp(void *matcher, void *key,
6230                            const struct rte_flow_item *item,
6231                            int inner)
6232 {
6233         const struct rte_flow_item_tcp *tcp_m = item->mask;
6234         const struct rte_flow_item_tcp *tcp_v = item->spec;
6235         void *headers_m;
6236         void *headers_v;
6237
6238         if (inner) {
6239                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6240                                          inner_headers);
6241                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6242         } else {
6243                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6244                                          outer_headers);
6245                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6246         }
6247         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6248         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
6249         if (!tcp_v)
6250                 return;
6251         if (!tcp_m)
6252                 tcp_m = &rte_flow_item_tcp_mask;
6253         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
6254                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
6255         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
6256                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
6257         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
6258                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
6259         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
6260                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
6261         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
6262                  tcp_m->hdr.tcp_flags);
6263         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
6264                  (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
6265 }
6266
6267 /**
6268  * Add UDP item to matcher and to the value.
6269  *
6270  * @param[in, out] matcher
6271  *   Flow matcher.
6272  * @param[in, out] key
6273  *   Flow matcher value.
6274  * @param[in] item
6275  *   Flow pattern to translate.
6276  * @param[in] inner
6277  *   Item is inner pattern.
6278  */
6279 static void
6280 flow_dv_translate_item_udp(void *matcher, void *key,
6281                            const struct rte_flow_item *item,
6282                            int inner)
6283 {
6284         const struct rte_flow_item_udp *udp_m = item->mask;
6285         const struct rte_flow_item_udp *udp_v = item->spec;
6286         void *headers_m;
6287         void *headers_v;
6288
6289         if (inner) {
6290                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6291                                          inner_headers);
6292                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6293         } else {
6294                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6295                                          outer_headers);
6296                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6297         }
6298         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6299         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
6300         if (!udp_v)
6301                 return;
6302         if (!udp_m)
6303                 udp_m = &rte_flow_item_udp_mask;
6304         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
6305                  rte_be_to_cpu_16(udp_m->hdr.src_port));
6306         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
6307                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
6308         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
6309                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
6310         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
6311                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
6312 }
6313
6314 /**
6315  * Add GRE optional Key item to matcher and to the value.
6316  *
6317  * @param[in, out] matcher
6318  *   Flow matcher.
6319  * @param[in, out] key
6320  *   Flow matcher value.
6321  * @param[in] item
6322  *   Flow pattern to translate.
6323  * @param[in] inner
6324  *   Item is inner pattern.
6325  */
6326 static void
6327 flow_dv_translate_item_gre_key(void *matcher, void *key,
6328                                    const struct rte_flow_item *item)
6329 {
6330         const rte_be32_t *key_m = item->mask;
6331         const rte_be32_t *key_v = item->spec;
6332         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6333         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6334         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
6335
6336         /* GRE K bit must be on and should already be validated */
6337         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
6338         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
6339         if (!key_v)
6340                 return;
6341         if (!key_m)
6342                 key_m = &gre_key_default_mask;
6343         MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
6344                  rte_be_to_cpu_32(*key_m) >> 8);
6345         MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
6346                  rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
6347         MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
6348                  rte_be_to_cpu_32(*key_m) & 0xFF);
6349         MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
6350                  rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
6351 }
6352
6353 /**
6354  * Add GRE item to matcher and to the value.
6355  *
6356  * @param[in, out] matcher
6357  *   Flow matcher.
6358  * @param[in, out] key
6359  *   Flow matcher value.
6360  * @param[in] item
6361  *   Flow pattern to translate.
6362  * @param[in] inner
6363  *   Item is inner pattern.
6364  */
6365 static void
6366 flow_dv_translate_item_gre(void *matcher, void *key,
6367                            const struct rte_flow_item *item,
6368                            int inner)
6369 {
6370         const struct rte_flow_item_gre *gre_m = item->mask;
6371         const struct rte_flow_item_gre *gre_v = item->spec;
6372         void *headers_m;
6373         void *headers_v;
6374         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6375         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6376         struct {
6377                 union {
6378                         __extension__
6379                         struct {
6380                                 uint16_t version:3;
6381                                 uint16_t rsvd0:9;
6382                                 uint16_t s_present:1;
6383                                 uint16_t k_present:1;
6384                                 uint16_t rsvd_bit1:1;
6385                                 uint16_t c_present:1;
6386                         };
6387                         uint16_t value;
6388                 };
6389         } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
6390
6391         if (inner) {
6392                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6393                                          inner_headers);
6394                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6395         } else {
6396                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6397                                          outer_headers);
6398                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6399         }
6400         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6401         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
6402         if (!gre_v)
6403                 return;
6404         if (!gre_m)
6405                 gre_m = &rte_flow_item_gre_mask;
6406         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
6407                  rte_be_to_cpu_16(gre_m->protocol));
6408         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
6409                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
6410         gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
6411         gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
6412         MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
6413                  gre_crks_rsvd0_ver_m.c_present);
6414         MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
6415                  gre_crks_rsvd0_ver_v.c_present &
6416                  gre_crks_rsvd0_ver_m.c_present);
6417         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
6418                  gre_crks_rsvd0_ver_m.k_present);
6419         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
6420                  gre_crks_rsvd0_ver_v.k_present &
6421                  gre_crks_rsvd0_ver_m.k_present);
6422         MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
6423                  gre_crks_rsvd0_ver_m.s_present);
6424         MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
6425                  gre_crks_rsvd0_ver_v.s_present &
6426                  gre_crks_rsvd0_ver_m.s_present);
6427 }
6428
6429 /**
6430  * Add NVGRE item to matcher and to the value.
6431  *
6432  * @param[in, out] matcher
6433  *   Flow matcher.
6434  * @param[in, out] key
6435  *   Flow matcher value.
6436  * @param[in] item
6437  *   Flow pattern to translate.
6438  * @param[in] inner
6439  *   Item is inner pattern.
6440  */
6441 static void
6442 flow_dv_translate_item_nvgre(void *matcher, void *key,
6443                              const struct rte_flow_item *item,
6444                              int inner)
6445 {
6446         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
6447         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
6448         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6449         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6450         const char *tni_flow_id_m = (const char *)nvgre_m->tni;
6451         const char *tni_flow_id_v = (const char *)nvgre_v->tni;
6452         char *gre_key_m;
6453         char *gre_key_v;
6454         int size;
6455         int i;
6456
6457         /* For NVGRE, GRE header fields must be set with defined values. */
6458         const struct rte_flow_item_gre gre_spec = {
6459                 .c_rsvd0_ver = RTE_BE16(0x2000),
6460                 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
6461         };
6462         const struct rte_flow_item_gre gre_mask = {
6463                 .c_rsvd0_ver = RTE_BE16(0xB000),
6464                 .protocol = RTE_BE16(UINT16_MAX),
6465         };
6466         const struct rte_flow_item gre_item = {
6467                 .spec = &gre_spec,
6468                 .mask = &gre_mask,
6469                 .last = NULL,
6470         };
6471         flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
6472         if (!nvgre_v)
6473                 return;
6474         if (!nvgre_m)
6475                 nvgre_m = &rte_flow_item_nvgre_mask;
6476         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
6477         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
6478         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
6479         memcpy(gre_key_m, tni_flow_id_m, size);
6480         for (i = 0; i < size; ++i)
6481                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
6482 }
6483
6484 /**
6485  * Add VXLAN item to matcher and to the value.
6486  *
6487  * @param[in, out] matcher
6488  *   Flow matcher.
6489  * @param[in, out] key
6490  *   Flow matcher value.
6491  * @param[in] item
6492  *   Flow pattern to translate.
6493  * @param[in] inner
6494  *   Item is inner pattern.
6495  */
6496 static void
6497 flow_dv_translate_item_vxlan(void *matcher, void *key,
6498                              const struct rte_flow_item *item,
6499                              int inner)
6500 {
6501         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
6502         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
6503         void *headers_m;
6504         void *headers_v;
6505         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6506         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6507         char *vni_m;
6508         char *vni_v;
6509         uint16_t dport;
6510         int size;
6511         int i;
6512
6513         if (inner) {
6514                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6515                                          inner_headers);
6516                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6517         } else {
6518                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6519                                          outer_headers);
6520                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6521         }
6522         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
6523                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
6524         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
6525                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
6526                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
6527         }
6528         if (!vxlan_v)
6529                 return;
6530         if (!vxlan_m)
6531                 vxlan_m = &rte_flow_item_vxlan_mask;
6532         size = sizeof(vxlan_m->vni);
6533         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
6534         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
6535         memcpy(vni_m, vxlan_m->vni, size);
6536         for (i = 0; i < size; ++i)
6537                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
6538 }
6539
6540 /**
6541  * Add VXLAN-GPE item to matcher and to the value.
6542  *
6543  * @param[in, out] matcher
6544  *   Flow matcher.
6545  * @param[in, out] key
6546  *   Flow matcher value.
6547  * @param[in] item
6548  *   Flow pattern to translate.
6549  * @param[in] inner
6550  *   Item is inner pattern.
6551  */
6552
6553 static void
6554 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
6555                                  const struct rte_flow_item *item, int inner)
6556 {
6557         const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
6558         const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
6559         void *headers_m;
6560         void *headers_v;
6561         void *misc_m =
6562                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
6563         void *misc_v =
6564                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
6565         char *vni_m;
6566         char *vni_v;
6567         uint16_t dport;
6568         int size;
6569         int i;
6570         uint8_t flags_m = 0xff;
6571         uint8_t flags_v = 0xc;
6572
6573         if (inner) {
6574                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6575                                          inner_headers);
6576                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6577         } else {
6578                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6579                                          outer_headers);
6580                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6581         }
6582         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
6583                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
6584         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
6585                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
6586                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
6587         }
6588         if (!vxlan_v)
6589                 return;
6590         if (!vxlan_m)
6591                 vxlan_m = &rte_flow_item_vxlan_gpe_mask;
6592         size = sizeof(vxlan_m->vni);
6593         vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
6594         vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
6595         memcpy(vni_m, vxlan_m->vni, size);
6596         for (i = 0; i < size; ++i)
6597                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
6598         if (vxlan_m->flags) {
6599                 flags_m = vxlan_m->flags;
6600                 flags_v = vxlan_v->flags;
6601         }
6602         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
6603         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
6604         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol,
6605                  vxlan_m->protocol);
6606         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol,
6607                  vxlan_v->protocol);
6608 }
6609
6610 /**
6611  * Add Geneve item to matcher and to the value.
6612  *
6613  * @param[in, out] matcher
6614  *   Flow matcher.
6615  * @param[in, out] key
6616  *   Flow matcher value.
6617  * @param[in] item
6618  *   Flow pattern to translate.
6619  * @param[in] inner
6620  *   Item is inner pattern.
6621  */
6622
6623 static void
6624 flow_dv_translate_item_geneve(void *matcher, void *key,
6625                               const struct rte_flow_item *item, int inner)
6626 {
6627         const struct rte_flow_item_geneve *geneve_m = item->mask;
6628         const struct rte_flow_item_geneve *geneve_v = item->spec;
6629         void *headers_m;
6630         void *headers_v;
6631         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6632         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6633         uint16_t dport;
6634         uint16_t gbhdr_m;
6635         uint16_t gbhdr_v;
6636         char *vni_m;
6637         char *vni_v;
6638         size_t size, i;
6639
6640         if (inner) {
6641                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6642                                          inner_headers);
6643                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6644         } else {
6645                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6646                                          outer_headers);
6647                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6648         }
6649         dport = MLX5_UDP_PORT_GENEVE;
6650         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
6651                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
6652                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
6653         }
6654         if (!geneve_v)
6655                 return;
6656         if (!geneve_m)
6657                 geneve_m = &rte_flow_item_geneve_mask;
6658         size = sizeof(geneve_m->vni);
6659         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
6660         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
6661         memcpy(vni_m, geneve_m->vni, size);
6662         for (i = 0; i < size; ++i)
6663                 vni_v[i] = vni_m[i] & geneve_v->vni[i];
6664         MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
6665                  rte_be_to_cpu_16(geneve_m->protocol));
6666         MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
6667                  rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
6668         gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
6669         gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
6670         MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
6671                  MLX5_GENEVE_OAMF_VAL(gbhdr_m));
6672         MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
6673                  MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
6674         MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
6675                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
6676         MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
6677                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
6678                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
6679 }
6680
6681 /**
6682  * Add MPLS item to matcher and to the value.
6683  *
6684  * @param[in, out] matcher
6685  *   Flow matcher.
6686  * @param[in, out] key
6687  *   Flow matcher value.
6688  * @param[in] item
6689  *   Flow pattern to translate.
6690  * @param[in] prev_layer
6691  *   The protocol layer indicated in previous item.
6692  * @param[in] inner
6693  *   Item is inner pattern.
6694  */
6695 static void
6696 flow_dv_translate_item_mpls(void *matcher, void *key,
6697                             const struct rte_flow_item *item,
6698                             uint64_t prev_layer,
6699                             int inner)
6700 {
6701         const uint32_t *in_mpls_m = item->mask;
6702         const uint32_t *in_mpls_v = item->spec;
6703         uint32_t *out_mpls_m = 0;
6704         uint32_t *out_mpls_v = 0;
6705         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6706         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6707         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
6708                                      misc_parameters_2);
6709         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
6710         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
6711         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6712
6713         switch (prev_layer) {
6714         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
6715                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
6716                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
6717                          MLX5_UDP_PORT_MPLS);
6718                 break;
6719         case MLX5_FLOW_LAYER_GRE:
6720                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
6721                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
6722                          RTE_ETHER_TYPE_MPLS);
6723                 break;
6724         default:
6725                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6726                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6727                          IPPROTO_MPLS);
6728                 break;
6729         }
6730         if (!in_mpls_v)
6731                 return;
6732         if (!in_mpls_m)
6733                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
6734         switch (prev_layer) {
6735         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
6736                 out_mpls_m =
6737                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
6738                                                  outer_first_mpls_over_udp);
6739                 out_mpls_v =
6740                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
6741                                                  outer_first_mpls_over_udp);
6742                 break;
6743         case MLX5_FLOW_LAYER_GRE:
6744                 out_mpls_m =
6745                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
6746                                                  outer_first_mpls_over_gre);
6747                 out_mpls_v =
6748                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
6749                                                  outer_first_mpls_over_gre);
6750                 break;
6751         default:
6752                 /* Inner MPLS not over GRE is not supported. */
6753                 if (!inner) {
6754                         out_mpls_m =
6755                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
6756                                                          misc2_m,
6757                                                          outer_first_mpls);
6758                         out_mpls_v =
6759                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
6760                                                          misc2_v,
6761                                                          outer_first_mpls);
6762                 }
6763                 break;
6764         }
6765         if (out_mpls_m && out_mpls_v) {
6766                 *out_mpls_m = *in_mpls_m;
6767                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
6768         }
6769 }
6770
6771 /**
6772  * Add metadata register item to matcher
6773  *
6774  * @param[in, out] matcher
6775  *   Flow matcher.
6776  * @param[in, out] key
6777  *   Flow matcher value.
6778  * @param[in] reg_type
6779  *   Type of device metadata register
6780  * @param[in] value
6781  *   Register value
6782  * @param[in] mask
6783  *   Register mask
6784  */
6785 static void
6786 flow_dv_match_meta_reg(void *matcher, void *key,
6787                        enum modify_reg reg_type,
6788                        uint32_t data, uint32_t mask)
6789 {
6790         void *misc2_m =
6791                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
6792         void *misc2_v =
6793                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
6794         uint32_t temp;
6795
6796         data &= mask;
6797         switch (reg_type) {
6798         case REG_A:
6799                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
6800                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
6801                 break;
6802         case REG_B:
6803                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
6804                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
6805                 break;
6806         case REG_C_0:
6807                 /*
6808                  * The metadata register C0 field might be divided into
6809                  * source vport index and META item value, we should set
6810                  * this field according to specified mask, not as whole one.
6811                  */
6812                 temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
6813                 temp |= mask;
6814                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
6815                 temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
6816                 temp &= ~mask;
6817                 temp |= data;
6818                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
6819                 break;
6820         case REG_C_1:
6821                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
6822                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
6823                 break;
6824         case REG_C_2:
6825                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
6826                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
6827                 break;
6828         case REG_C_3:
6829                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
6830                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
6831                 break;
6832         case REG_C_4:
6833                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
6834                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
6835                 break;
6836         case REG_C_5:
6837                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
6838                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
6839                 break;
6840         case REG_C_6:
6841                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
6842                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
6843                 break;
6844         case REG_C_7:
6845                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
6846                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
6847                 break;
6848         default:
6849                 MLX5_ASSERT(false);
6850                 break;
6851         }
6852 }
6853
6854 /**
6855  * Add MARK item to matcher
6856  *
6857  * @param[in] dev
6858  *   The device to configure through.
6859  * @param[in, out] matcher
6860  *   Flow matcher.
6861  * @param[in, out] key
6862  *   Flow matcher value.
6863  * @param[in] item
6864  *   Flow pattern to translate.
6865  */
6866 static void
6867 flow_dv_translate_item_mark(struct rte_eth_dev *dev,
6868                             void *matcher, void *key,
6869                             const struct rte_flow_item *item)
6870 {
6871         struct mlx5_priv *priv = dev->data->dev_private;
6872         const struct rte_flow_item_mark *mark;
6873         uint32_t value;
6874         uint32_t mask;
6875
6876         mark = item->mask ? (const void *)item->mask :
6877                             &rte_flow_item_mark_mask;
6878         mask = mark->id & priv->sh->dv_mark_mask;
6879         mark = (const void *)item->spec;
6880         MLX5_ASSERT(mark);
6881         value = mark->id & priv->sh->dv_mark_mask & mask;
6882         if (mask) {
6883                 enum modify_reg reg;
6884
6885                 /* Get the metadata register index for the mark. */
6886                 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
6887                 MLX5_ASSERT(reg > 0);
6888                 if (reg == REG_C_0) {
6889                         struct mlx5_priv *priv = dev->data->dev_private;
6890                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
6891                         uint32_t shl_c0 = rte_bsf32(msk_c0);
6892
6893                         mask &= msk_c0;
6894                         mask <<= shl_c0;
6895                         value <<= shl_c0;
6896                 }
6897                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
6898         }
6899 }
6900
6901 /**
6902  * Add META item to matcher
6903  *
6904  * @param[in] dev
6905  *   The devich to configure through.
6906  * @param[in, out] matcher
6907  *   Flow matcher.
6908  * @param[in, out] key
6909  *   Flow matcher value.
6910  * @param[in] attr
6911  *   Attributes of flow that includes this item.
6912  * @param[in] item
6913  *   Flow pattern to translate.
6914  */
6915 static void
6916 flow_dv_translate_item_meta(struct rte_eth_dev *dev,
6917                             void *matcher, void *key,
6918                             const struct rte_flow_attr *attr,
6919                             const struct rte_flow_item *item)
6920 {
6921         const struct rte_flow_item_meta *meta_m;
6922         const struct rte_flow_item_meta *meta_v;
6923
6924         meta_m = (const void *)item->mask;
6925         if (!meta_m)
6926                 meta_m = &rte_flow_item_meta_mask;
6927         meta_v = (const void *)item->spec;
6928         if (meta_v) {
6929                 int reg;
6930                 uint32_t value = meta_v->data;
6931                 uint32_t mask = meta_m->data;
6932
6933                 reg = flow_dv_get_metadata_reg(dev, attr, NULL);
6934                 if (reg < 0)
6935                         return;
6936                 /*
6937                  * In datapath code there is no endianness
6938                  * coversions for perfromance reasons, all
6939                  * pattern conversions are done in rte_flow.
6940                  */
6941                 value = rte_cpu_to_be_32(value);
6942                 mask = rte_cpu_to_be_32(mask);
6943                 if (reg == REG_C_0) {
6944                         struct mlx5_priv *priv = dev->data->dev_private;
6945                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
6946                         uint32_t shl_c0 = rte_bsf32(msk_c0);
6947 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
6948                         uint32_t shr_c0 = __builtin_clz(priv->sh->dv_meta_mask);
6949
6950                         value >>= shr_c0;
6951                         mask >>= shr_c0;
6952 #endif
6953                         value <<= shl_c0;
6954                         mask <<= shl_c0;
6955                         MLX5_ASSERT(msk_c0);
6956                         MLX5_ASSERT(!(~msk_c0 & mask));
6957                 }
6958                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
6959         }
6960 }
6961
6962 /**
6963  * Add vport metadata Reg C0 item to matcher
6964  *
6965  * @param[in, out] matcher
6966  *   Flow matcher.
6967  * @param[in, out] key
6968  *   Flow matcher value.
6969  * @param[in] reg
6970  *   Flow pattern to translate.
6971  */
6972 static void
6973 flow_dv_translate_item_meta_vport(void *matcher, void *key,
6974                                   uint32_t value, uint32_t mask)
6975 {
6976         flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
6977 }
6978
6979 /**
6980  * Add tag item to matcher
6981  *
6982  * @param[in] dev
6983  *   The devich to configure through.
6984  * @param[in, out] matcher
6985  *   Flow matcher.
6986  * @param[in, out] key
6987  *   Flow matcher value.
6988  * @param[in] item
6989  *   Flow pattern to translate.
6990  */
6991 static void
6992 flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
6993                                 void *matcher, void *key,
6994                                 const struct rte_flow_item *item)
6995 {
6996         const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
6997         const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
6998         uint32_t mask, value;
6999
7000         MLX5_ASSERT(tag_v);
7001         value = tag_v->data;
7002         mask = tag_m ? tag_m->data : UINT32_MAX;
7003         if (tag_v->id == REG_C_0) {
7004                 struct mlx5_priv *priv = dev->data->dev_private;
7005                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
7006                 uint32_t shl_c0 = rte_bsf32(msk_c0);
7007
7008                 mask &= msk_c0;
7009                 mask <<= shl_c0;
7010                 value <<= shl_c0;
7011         }
7012         flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
7013 }
7014
7015 /**
7016  * Add TAG item to matcher
7017  *
7018  * @param[in] dev
7019  *   The devich to configure through.
7020  * @param[in, out] matcher
7021  *   Flow matcher.
7022  * @param[in, out] key
7023  *   Flow matcher value.
7024  * @param[in] item
7025  *   Flow pattern to translate.
7026  */
7027 static void
7028 flow_dv_translate_item_tag(struct rte_eth_dev *dev,
7029                            void *matcher, void *key,
7030                            const struct rte_flow_item *item)
7031 {
7032         const struct rte_flow_item_tag *tag_v = item->spec;
7033         const struct rte_flow_item_tag *tag_m = item->mask;
7034         enum modify_reg reg;
7035
7036         MLX5_ASSERT(tag_v);
7037         tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
7038         /* Get the metadata register index for the tag. */
7039         reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
7040         MLX5_ASSERT(reg > 0);
7041         flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
7042 }
7043
7044 /**
7045  * Add source vport match to the specified matcher.
7046  *
7047  * @param[in, out] matcher
7048  *   Flow matcher.
7049  * @param[in, out] key
7050  *   Flow matcher value.
7051  * @param[in] port
7052  *   Source vport value to match
7053  * @param[in] mask
7054  *   Mask
7055  */
7056 static void
7057 flow_dv_translate_item_source_vport(void *matcher, void *key,
7058                                     int16_t port, uint16_t mask)
7059 {
7060         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7061         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7062
7063         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
7064         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
7065 }
7066
7067 /**
7068  * Translate port-id item to eswitch match on  port-id.
7069  *
7070  * @param[in] dev
7071  *   The devich to configure through.
7072  * @param[in, out] matcher
7073  *   Flow matcher.
7074  * @param[in, out] key
7075  *   Flow matcher value.
7076  * @param[in] item
7077  *   Flow pattern to translate.
7078  *
7079  * @return
7080  *   0 on success, a negative errno value otherwise.
7081  */
7082 static int
7083 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
7084                                void *key, const struct rte_flow_item *item)
7085 {
7086         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
7087         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
7088         struct mlx5_priv *priv;
7089         uint16_t mask, id;
7090
7091         mask = pid_m ? pid_m->id : 0xffff;
7092         id = pid_v ? pid_v->id : dev->data->port_id;
7093         priv = mlx5_port_to_eswitch_info(id, item == NULL);
7094         if (!priv)
7095                 return -rte_errno;
7096         /* Translate to vport field or to metadata, depending on mode. */
7097         if (priv->vport_meta_mask)
7098                 flow_dv_translate_item_meta_vport(matcher, key,
7099                                                   priv->vport_meta_tag,
7100                                                   priv->vport_meta_mask);
7101         else
7102                 flow_dv_translate_item_source_vport(matcher, key,
7103                                                     priv->vport_id, mask);
7104         return 0;
7105 }
7106
7107 /**
7108  * Add ICMP6 item to matcher and to the value.
7109  *
7110  * @param[in, out] matcher
7111  *   Flow matcher.
7112  * @param[in, out] key
7113  *   Flow matcher value.
7114  * @param[in] item
7115  *   Flow pattern to translate.
7116  * @param[in] inner
7117  *   Item is inner pattern.
7118  */
7119 static void
7120 flow_dv_translate_item_icmp6(void *matcher, void *key,
7121                               const struct rte_flow_item *item,
7122                               int inner)
7123 {
7124         const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
7125         const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
7126         void *headers_m;
7127         void *headers_v;
7128         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
7129                                      misc_parameters_3);
7130         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7131         if (inner) {
7132                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7133                                          inner_headers);
7134                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7135         } else {
7136                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7137                                          outer_headers);
7138                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7139         }
7140         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
7141         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
7142         if (!icmp6_v)
7143                 return;
7144         if (!icmp6_m)
7145                 icmp6_m = &rte_flow_item_icmp6_mask;
7146         /*
7147          * Force flow only to match the non-fragmented IPv6 ICMPv6 packets.
7148          * If only the protocol is specified, no need to match the frag.
7149          */
7150         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
7151         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
7152         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
7153         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
7154                  icmp6_v->type & icmp6_m->type);
7155         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
7156         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
7157                  icmp6_v->code & icmp6_m->code);
7158 }
7159
7160 /**
7161  * Add ICMP item to matcher and to the value.
7162  *
7163  * @param[in, out] matcher
7164  *   Flow matcher.
7165  * @param[in, out] key
7166  *   Flow matcher value.
7167  * @param[in] item
7168  *   Flow pattern to translate.
7169  * @param[in] inner
7170  *   Item is inner pattern.
7171  */
7172 static void
7173 flow_dv_translate_item_icmp(void *matcher, void *key,
7174                             const struct rte_flow_item *item,
7175                             int inner)
7176 {
7177         const struct rte_flow_item_icmp *icmp_m = item->mask;
7178         const struct rte_flow_item_icmp *icmp_v = item->spec;
7179         void *headers_m;
7180         void *headers_v;
7181         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
7182                                      misc_parameters_3);
7183         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7184         if (inner) {
7185                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7186                                          inner_headers);
7187                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7188         } else {
7189                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7190                                          outer_headers);
7191                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7192         }
7193         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
7194         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
7195         if (!icmp_v)
7196                 return;
7197         if (!icmp_m)
7198                 icmp_m = &rte_flow_item_icmp_mask;
7199         /*
7200          * Force flow only to match the non-fragmented IPv4 ICMP packets.
7201          * If only the protocol is specified, no need to match the frag.
7202          */
7203         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
7204         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
7205         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
7206                  icmp_m->hdr.icmp_type);
7207         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
7208                  icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
7209         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
7210                  icmp_m->hdr.icmp_code);
7211         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
7212                  icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
7213 }
7214
7215 /**
7216  * Add GTP item to matcher and to the value.
7217  *
7218  * @param[in, out] matcher
7219  *   Flow matcher.
7220  * @param[in, out] key
7221  *   Flow matcher value.
7222  * @param[in] item
7223  *   Flow pattern to translate.
7224  * @param[in] inner
7225  *   Item is inner pattern.
7226  */
7227 static void
7228 flow_dv_translate_item_gtp(void *matcher, void *key,
7229                            const struct rte_flow_item *item, int inner)
7230 {
7231         const struct rte_flow_item_gtp *gtp_m = item->mask;
7232         const struct rte_flow_item_gtp *gtp_v = item->spec;
7233         void *headers_m;
7234         void *headers_v;
7235         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
7236                                      misc_parameters_3);
7237         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7238         uint16_t dport = RTE_GTPU_UDP_PORT;
7239
7240         if (inner) {
7241                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7242                                          inner_headers);
7243                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7244         } else {
7245                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7246                                          outer_headers);
7247                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7248         }
7249         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7250                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7251                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7252         }
7253         if (!gtp_v)
7254                 return;
7255         if (!gtp_m)
7256                 gtp_m = &rte_flow_item_gtp_mask;
7257         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags,
7258                  gtp_m->v_pt_rsv_flags);
7259         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags,
7260                  gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags);
7261         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
7262         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
7263                  gtp_v->msg_type & gtp_m->msg_type);
7264         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,
7265                  rte_be_to_cpu_32(gtp_m->teid));
7266         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,
7267                  rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
7268 }
7269
7270 /**
7271  * Add eCPRI item to matcher and to the value.
7272  *
7273  * @param[in] dev
7274  *   The devich to configure through.
7275  * @param[in, out] matcher
7276  *   Flow matcher.
7277  * @param[in, out] key
7278  *   Flow matcher value.
7279  * @param[in] item
7280  *   Flow pattern to translate.
7281  * @param[in] samples
7282  *   Sample IDs to be used in the matching.
7283  */
7284 static void
7285 flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
7286                              void *key, const struct rte_flow_item *item)
7287 {
7288         struct mlx5_priv *priv = dev->data->dev_private;
7289         const struct rte_flow_item_ecpri *ecpri_m = item->mask;
7290         const struct rte_flow_item_ecpri *ecpri_v = item->spec;
7291         void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
7292                                      misc_parameters_4);
7293         void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
7294         uint32_t *samples;
7295         void *dw_m;
7296         void *dw_v;
7297
7298         if (!ecpri_v)
7299                 return;
7300         if (!ecpri_m)
7301                 ecpri_m = &rte_flow_item_ecpri_mask;
7302         /*
7303          * Maximal four DW samples are supported in a single matching now.
7304          * Two are used now for a eCPRI matching:
7305          * 1. Type: one byte, mask should be 0x00ff0000 in network order
7306          * 2. ID of a message: one or two bytes, mask 0xffff0000 or 0xff000000
7307          *    if any.
7308          */
7309         if (!ecpri_m->hdr.common.u32)
7310                 return;
7311         samples = priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0].ids;
7312         /* Need to take the whole DW as the mask to fill the entry. */
7313         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
7314                             prog_sample_field_value_0);
7315         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
7316                             prog_sample_field_value_0);
7317         /* Already big endian (network order) in the header. */
7318         *(uint32_t *)dw_m = ecpri_m->hdr.common.u32;
7319         *(uint32_t *)dw_v = ecpri_v->hdr.common.u32;
7320         /* Sample#0, used for matching type, offset 0. */
7321         MLX5_SET(fte_match_set_misc4, misc4_m,
7322                  prog_sample_field_id_0, samples[0]);
7323         /* It makes no sense to set the sample ID in the mask field. */
7324         MLX5_SET(fte_match_set_misc4, misc4_v,
7325                  prog_sample_field_id_0, samples[0]);
7326         /*
7327          * Checking if message body part needs to be matched.
7328          * Some wildcard rules only matching type field should be supported.
7329          */
7330         if (ecpri_m->hdr.dummy[0]) {
7331                 switch (ecpri_v->hdr.common.type) {
7332                 case RTE_ECPRI_MSG_TYPE_IQ_DATA:
7333                 case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
7334                 case RTE_ECPRI_MSG_TYPE_DLY_MSR:
7335                         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
7336                                             prog_sample_field_value_1);
7337                         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
7338                                             prog_sample_field_value_1);
7339                         *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0];
7340                         *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0];
7341                         /* Sample#1, to match message body, offset 4. */
7342                         MLX5_SET(fte_match_set_misc4, misc4_m,
7343                                  prog_sample_field_id_1, samples[1]);
7344                         MLX5_SET(fte_match_set_misc4, misc4_v,
7345                                  prog_sample_field_id_1, samples[1]);
7346                         break;
7347                 default:
7348                         /* Others, do not match any sample ID. */
7349                         break;
7350                 }
7351         }
7352 }
7353
7354 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
7355
7356 #define HEADER_IS_ZERO(match_criteria, headers)                              \
7357         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
7358                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
7359
7360 /**
7361  * Calculate flow matcher enable bitmap.
7362  *
7363  * @param match_criteria
7364  *   Pointer to flow matcher criteria.
7365  *
7366  * @return
7367  *   Bitmap of enabled fields.
7368  */
7369 static uint8_t
7370 flow_dv_matcher_enable(uint32_t *match_criteria)
7371 {
7372         uint8_t match_criteria_enable;
7373
7374         match_criteria_enable =
7375                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
7376                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
7377         match_criteria_enable |=
7378                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
7379                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
7380         match_criteria_enable |=
7381                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
7382                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
7383         match_criteria_enable |=
7384                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
7385                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
7386         match_criteria_enable |=
7387                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
7388                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
7389         match_criteria_enable |=
7390                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) <<
7391                 MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT;
7392         return match_criteria_enable;
7393 }
7394
7395
7396 /**
7397  * Get a flow table.
7398  *
7399  * @param[in, out] dev
7400  *   Pointer to rte_eth_dev structure.
7401  * @param[in] table_id
7402  *   Table id to use.
7403  * @param[in] egress
7404  *   Direction of the table.
7405  * @param[in] transfer
7406  *   E-Switch or NIC flow.
7407  * @param[out] error
7408  *   pointer to error structure.
7409  *
7410  * @return
7411  *   Returns tables resource based on the index, NULL in case of failed.
7412  */
7413 static struct mlx5_flow_tbl_resource *
7414 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
7415                          uint32_t table_id, uint8_t egress,
7416                          uint8_t transfer,
7417                          struct rte_flow_error *error)
7418 {
7419         struct mlx5_priv *priv = dev->data->dev_private;
7420         struct mlx5_dev_ctx_shared *sh = priv->sh;
7421         struct mlx5_flow_tbl_resource *tbl;
7422         union mlx5_flow_tbl_key table_key = {
7423                 {
7424                         .table_id = table_id,
7425                         .reserved = 0,
7426                         .domain = !!transfer,
7427                         .direction = !!egress,
7428                 }
7429         };
7430         struct mlx5_hlist_entry *pos = mlx5_hlist_lookup(sh->flow_tbls,
7431                                                          table_key.v64);
7432         struct mlx5_flow_tbl_data_entry *tbl_data;
7433         uint32_t idx = 0;
7434         int ret;
7435         void *domain;
7436
7437         if (pos) {
7438                 tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
7439                                         entry);
7440                 tbl = &tbl_data->tbl;
7441                 rte_atomic32_inc(&tbl->refcnt);
7442                 return tbl;
7443         }
7444         tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
7445         if (!tbl_data) {
7446                 rte_flow_error_set(error, ENOMEM,
7447                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7448                                    NULL,
7449                                    "cannot allocate flow table data entry");
7450                 return NULL;
7451         }
7452         tbl_data->idx = idx;
7453         tbl = &tbl_data->tbl;
7454         pos = &tbl_data->entry;
7455         if (transfer)
7456                 domain = sh->fdb_domain;
7457         else if (egress)
7458                 domain = sh->tx_domain;
7459         else
7460                 domain = sh->rx_domain;
7461         ret = mlx5_flow_os_create_flow_tbl(domain, table_id, &tbl->obj);
7462         if (ret) {
7463                 rte_flow_error_set(error, ENOMEM,
7464                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7465                                    NULL, "cannot create flow table object");
7466                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
7467                 return NULL;
7468         }
7469         /*
7470          * No multi-threads now, but still better to initialize the reference
7471          * count before insert it into the hash list.
7472          */
7473         rte_atomic32_init(&tbl->refcnt);
7474         /* Jump action reference count is initialized here. */
7475         rte_atomic32_init(&tbl_data->jump.refcnt);
7476         pos->key = table_key.v64;
7477         ret = mlx5_hlist_insert(sh->flow_tbls, pos);
7478         if (ret < 0) {
7479                 rte_flow_error_set(error, -ret,
7480                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7481                                    "cannot insert flow table data entry");
7482                 mlx5_flow_os_destroy_flow_tbl(tbl->obj);
7483                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
7484         }
7485         rte_atomic32_inc(&tbl->refcnt);
7486         return tbl;
7487 }
7488
7489 /**
7490  * Release a flow table.
7491  *
7492  * @param[in] dev
7493  *   Pointer to rte_eth_dev structure.
7494  * @param[in] tbl
7495  *   Table resource to be released.
7496  *
7497  * @return
7498  *   Returns 0 if table was released, else return 1;
7499  */
7500 static int
7501 flow_dv_tbl_resource_release(struct rte_eth_dev *dev,
7502                              struct mlx5_flow_tbl_resource *tbl)
7503 {
7504         struct mlx5_priv *priv = dev->data->dev_private;
7505         struct mlx5_dev_ctx_shared *sh = priv->sh;
7506         struct mlx5_flow_tbl_data_entry *tbl_data =
7507                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
7508
7509         if (!tbl)
7510                 return 0;
7511         if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
7512                 struct mlx5_hlist_entry *pos = &tbl_data->entry;
7513
7514                 mlx5_flow_os_destroy_flow_tbl(tbl->obj);
7515                 tbl->obj = NULL;
7516                 /* remove the entry from the hash list and free memory. */
7517                 mlx5_hlist_remove(sh->flow_tbls, pos);
7518                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_JUMP],
7519                                 tbl_data->idx);
7520                 return 0;
7521         }
7522         return 1;
7523 }
7524
7525 /**
7526  * Register the flow matcher.
7527  *
7528  * @param[in, out] dev
7529  *   Pointer to rte_eth_dev structure.
7530  * @param[in, out] matcher
7531  *   Pointer to flow matcher.
7532  * @param[in, out] key
7533  *   Pointer to flow table key.
7534  * @parm[in, out] dev_flow
7535  *   Pointer to the dev_flow.
7536  * @param[out] error
7537  *   pointer to error structure.
7538  *
7539  * @return
7540  *   0 on success otherwise -errno and errno is set.
7541  */
7542 static int
7543 flow_dv_matcher_register(struct rte_eth_dev *dev,
7544                          struct mlx5_flow_dv_matcher *matcher,
7545                          union mlx5_flow_tbl_key *key,
7546                          struct mlx5_flow *dev_flow,
7547                          struct rte_flow_error *error)
7548 {
7549         struct mlx5_priv *priv = dev->data->dev_private;
7550         struct mlx5_dev_ctx_shared *sh = priv->sh;
7551         struct mlx5_flow_dv_matcher *cache_matcher;
7552         struct mlx5dv_flow_matcher_attr dv_attr = {
7553                 .type = IBV_FLOW_ATTR_NORMAL,
7554                 .match_mask = (void *)&matcher->mask,
7555         };
7556         struct mlx5_flow_tbl_resource *tbl;
7557         struct mlx5_flow_tbl_data_entry *tbl_data;
7558         int ret;
7559
7560         tbl = flow_dv_tbl_resource_get(dev, key->table_id, key->direction,
7561                                        key->domain, error);
7562         if (!tbl)
7563                 return -rte_errno;      /* No need to refill the error info */
7564         tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
7565         /* Lookup from cache. */
7566         LIST_FOREACH(cache_matcher, &tbl_data->matchers, next) {
7567                 if (matcher->crc == cache_matcher->crc &&
7568                     matcher->priority == cache_matcher->priority &&
7569                     !memcmp((const void *)matcher->mask.buf,
7570                             (const void *)cache_matcher->mask.buf,
7571                             cache_matcher->mask.size)) {
7572                         DRV_LOG(DEBUG,
7573                                 "%s group %u priority %hd use %s "
7574                                 "matcher %p: refcnt %d++",
7575                                 key->domain ? "FDB" : "NIC", key->table_id,
7576                                 cache_matcher->priority,
7577                                 key->direction ? "tx" : "rx",
7578                                 (void *)cache_matcher,
7579                                 rte_atomic32_read(&cache_matcher->refcnt));
7580                         rte_atomic32_inc(&cache_matcher->refcnt);
7581                         dev_flow->handle->dvh.matcher = cache_matcher;
7582                         /* old matcher should not make the table ref++. */
7583                         flow_dv_tbl_resource_release(dev, tbl);
7584                         return 0;
7585                 }
7586         }
7587         /* Register new matcher. */
7588         cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
7589         if (!cache_matcher) {
7590                 flow_dv_tbl_resource_release(dev, tbl);
7591                 return rte_flow_error_set(error, ENOMEM,
7592                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7593                                           "cannot allocate matcher memory");
7594         }
7595         *cache_matcher = *matcher;
7596         dv_attr.match_criteria_enable =
7597                 flow_dv_matcher_enable(cache_matcher->mask.buf);
7598         dv_attr.priority = matcher->priority;
7599         if (key->direction)
7600                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
7601         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
7602                                                &cache_matcher->matcher_object);
7603         if (ret) {
7604                 rte_free(cache_matcher);
7605 #ifdef HAVE_MLX5DV_DR
7606                 flow_dv_tbl_resource_release(dev, tbl);
7607 #endif
7608                 return rte_flow_error_set(error, ENOMEM,
7609                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7610                                           NULL, "cannot create matcher");
7611         }
7612         /* Save the table information */
7613         cache_matcher->tbl = tbl;
7614         rte_atomic32_init(&cache_matcher->refcnt);
7615         /* only matcher ref++, table ref++ already done above in get API. */
7616         rte_atomic32_inc(&cache_matcher->refcnt);
7617         LIST_INSERT_HEAD(&tbl_data->matchers, cache_matcher, next);
7618         dev_flow->handle->dvh.matcher = cache_matcher;
7619         DRV_LOG(DEBUG, "%s group %u priority %hd new %s matcher %p: refcnt %d",
7620                 key->domain ? "FDB" : "NIC", key->table_id,
7621                 cache_matcher->priority,
7622                 key->direction ? "tx" : "rx", (void *)cache_matcher,
7623                 rte_atomic32_read(&cache_matcher->refcnt));
7624         return 0;
7625 }
7626
7627 /**
7628  * Find existing tag resource or create and register a new one.
7629  *
7630  * @param dev[in, out]
7631  *   Pointer to rte_eth_dev structure.
7632  * @param[in, out] tag_be24
7633  *   Tag value in big endian then R-shift 8.
7634  * @parm[in, out] dev_flow
7635  *   Pointer to the dev_flow.
7636  * @param[out] error
7637  *   pointer to error structure.
7638  *
7639  * @return
7640  *   0 on success otherwise -errno and errno is set.
7641  */
7642 static int
7643 flow_dv_tag_resource_register
7644                         (struct rte_eth_dev *dev,
7645                          uint32_t tag_be24,
7646                          struct mlx5_flow *dev_flow,
7647                          struct rte_flow_error *error)
7648 {
7649         struct mlx5_priv *priv = dev->data->dev_private;
7650         struct mlx5_dev_ctx_shared *sh = priv->sh;
7651         struct mlx5_flow_dv_tag_resource *cache_resource;
7652         struct mlx5_hlist_entry *entry;
7653         int ret;
7654
7655         /* Lookup a matching resource from cache. */
7656         entry = mlx5_hlist_lookup(sh->tag_table, (uint64_t)tag_be24);
7657         if (entry) {
7658                 cache_resource = container_of
7659                         (entry, struct mlx5_flow_dv_tag_resource, entry);
7660                 rte_atomic32_inc(&cache_resource->refcnt);
7661                 dev_flow->handle->dvh.rix_tag = cache_resource->idx;
7662                 dev_flow->dv.tag_resource = cache_resource;
7663                 DRV_LOG(DEBUG, "cached tag resource %p: refcnt now %d++",
7664                         (void *)cache_resource,
7665                         rte_atomic32_read(&cache_resource->refcnt));
7666                 return 0;
7667         }
7668         /* Register new resource. */
7669         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG],
7670                                        &dev_flow->handle->dvh.rix_tag);
7671         if (!cache_resource)
7672                 return rte_flow_error_set(error, ENOMEM,
7673                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7674                                           "cannot allocate resource memory");
7675         cache_resource->entry.key = (uint64_t)tag_be24;
7676         ret = mlx5_flow_os_create_flow_action_tag(tag_be24,
7677                                                   &cache_resource->action);
7678         if (ret) {
7679                 rte_free(cache_resource);
7680                 return rte_flow_error_set(error, ENOMEM,
7681                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7682                                           NULL, "cannot create action");
7683         }
7684         rte_atomic32_init(&cache_resource->refcnt);
7685         rte_atomic32_inc(&cache_resource->refcnt);
7686         if (mlx5_hlist_insert(sh->tag_table, &cache_resource->entry)) {
7687                 mlx5_flow_os_destroy_flow_action(cache_resource->action);
7688                 rte_free(cache_resource);
7689                 return rte_flow_error_set(error, EEXIST,
7690                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7691                                           NULL, "cannot insert tag");
7692         }
7693         dev_flow->dv.tag_resource = cache_resource;
7694         DRV_LOG(DEBUG, "new tag resource %p: refcnt now %d++",
7695                 (void *)cache_resource,
7696                 rte_atomic32_read(&cache_resource->refcnt));
7697         return 0;
7698 }
7699
7700 /**
7701  * Release the tag.
7702  *
7703  * @param dev
7704  *   Pointer to Ethernet device.
7705  * @param tag_idx
7706  *   Tag index.
7707  *
7708  * @return
7709  *   1 while a reference on it exists, 0 when freed.
7710  */
7711 static int
7712 flow_dv_tag_release(struct rte_eth_dev *dev,
7713                     uint32_t tag_idx)
7714 {
7715         struct mlx5_priv *priv = dev->data->dev_private;
7716         struct mlx5_dev_ctx_shared *sh = priv->sh;
7717         struct mlx5_flow_dv_tag_resource *tag;
7718
7719         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
7720         if (!tag)
7721                 return 0;
7722         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
7723                 dev->data->port_id, (void *)tag,
7724                 rte_atomic32_read(&tag->refcnt));
7725         if (rte_atomic32_dec_and_test(&tag->refcnt)) {
7726                 claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
7727                 mlx5_hlist_remove(sh->tag_table, &tag->entry);
7728                 DRV_LOG(DEBUG, "port %u tag %p: removed",
7729                         dev->data->port_id, (void *)tag);
7730                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
7731                 return 0;
7732         }
7733         return 1;
7734 }
7735
7736 /**
7737  * Translate port ID action to vport.
7738  *
7739  * @param[in] dev
7740  *   Pointer to rte_eth_dev structure.
7741  * @param[in] action
7742  *   Pointer to the port ID action.
7743  * @param[out] dst_port_id
7744  *   The target port ID.
7745  * @param[out] error
7746  *   Pointer to the error structure.
7747  *
7748  * @return
7749  *   0 on success, a negative errno value otherwise and rte_errno is set.
7750  */
7751 static int
7752 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
7753                                  const struct rte_flow_action *action,
7754                                  uint32_t *dst_port_id,
7755                                  struct rte_flow_error *error)
7756 {
7757         uint32_t port;
7758         struct mlx5_priv *priv;
7759         const struct rte_flow_action_port_id *conf =
7760                         (const struct rte_flow_action_port_id *)action->conf;
7761
7762         port = conf->original ? dev->data->port_id : conf->id;
7763         priv = mlx5_port_to_eswitch_info(port, false);
7764         if (!priv)
7765                 return rte_flow_error_set(error, -rte_errno,
7766                                           RTE_FLOW_ERROR_TYPE_ACTION,
7767                                           NULL,
7768                                           "No eswitch info was found for port");
7769 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
7770         /*
7771          * This parameter is transferred to
7772          * mlx5dv_dr_action_create_dest_ib_port().
7773          */
7774         *dst_port_id = priv->dev_port;
7775 #else
7776         /*
7777          * Legacy mode, no LAG configurations is supported.
7778          * This parameter is transferred to
7779          * mlx5dv_dr_action_create_dest_vport().
7780          */
7781         *dst_port_id = priv->vport_id;
7782 #endif
7783         return 0;
7784 }
7785
7786 /**
7787  * Create a counter with aging configuration.
7788  *
7789  * @param[in] dev
7790  *   Pointer to rte_eth_dev structure.
7791  * @param[out] count
7792  *   Pointer to the counter action configuration.
7793  * @param[in] age
7794  *   Pointer to the aging action configuration.
7795  *
7796  * @return
7797  *   Index to flow counter on success, 0 otherwise.
7798  */
7799 static uint32_t
7800 flow_dv_translate_create_counter(struct rte_eth_dev *dev,
7801                                 struct mlx5_flow *dev_flow,
7802                                 const struct rte_flow_action_count *count,
7803                                 const struct rte_flow_action_age *age)
7804 {
7805         uint32_t counter;
7806         struct mlx5_age_param *age_param;
7807
7808         counter = flow_dv_counter_alloc(dev,
7809                                 count ? count->shared : 0,
7810                                 count ? count->id : 0,
7811                                 dev_flow->dv.group, !!age);
7812         if (!counter || age == NULL)
7813                 return counter;
7814         age_param  = flow_dv_counter_idx_get_age(dev, counter);
7815         /*
7816          * The counter age accuracy may have a bit delay. Have 3/4
7817          * second bias on the timeount in order to let it age in time.
7818          */
7819         age_param->context = age->context ? age->context :
7820                 (void *)(uintptr_t)(dev_flow->flow_idx);
7821         /*
7822          * The counter age accuracy may have a bit delay. Have 3/4
7823          * second bias on the timeount in order to let it age in time.
7824          */
7825         age_param->timeout = age->timeout * 10 - MLX5_AGING_TIME_DELAY;
7826         /* Set expire time in unit of 0.1 sec. */
7827         age_param->port_id = dev->data->port_id;
7828         age_param->expire = age_param->timeout +
7829                         rte_rdtsc() / (rte_get_tsc_hz() / 10);
7830         rte_atomic16_set(&age_param->state, AGE_CANDIDATE);
7831         return counter;
7832 }
7833 /**
7834  * Add Tx queue matcher
7835  *
7836  * @param[in] dev
7837  *   Pointer to the dev struct.
7838  * @param[in, out] matcher
7839  *   Flow matcher.
7840  * @param[in, out] key
7841  *   Flow matcher value.
7842  * @param[in] item
7843  *   Flow pattern to translate.
7844  * @param[in] inner
7845  *   Item is inner pattern.
7846  */
7847 static void
7848 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
7849                                 void *matcher, void *key,
7850                                 const struct rte_flow_item *item)
7851 {
7852         const struct mlx5_rte_flow_item_tx_queue *queue_m;
7853         const struct mlx5_rte_flow_item_tx_queue *queue_v;
7854         void *misc_m =
7855                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7856         void *misc_v =
7857                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7858         struct mlx5_txq_ctrl *txq;
7859         uint32_t queue;
7860
7861
7862         queue_m = (const void *)item->mask;
7863         if (!queue_m)
7864                 return;
7865         queue_v = (const void *)item->spec;
7866         if (!queue_v)
7867                 return;
7868         txq = mlx5_txq_get(dev, queue_v->queue);
7869         if (!txq)
7870                 return;
7871         queue = txq->obj->sq->id;
7872         MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue);
7873         MLX5_SET(fte_match_set_misc, misc_v, source_sqn,
7874                  queue & queue_m->queue);
7875         mlx5_txq_release(dev, queue_v->queue);
7876 }
7877
7878 /**
7879  * Set the hash fields according to the @p flow information.
7880  *
7881  * @param[in] dev_flow
7882  *   Pointer to the mlx5_flow.
7883  * @param[in] rss_desc
7884  *   Pointer to the mlx5_flow_rss_desc.
7885  */
7886 static void
7887 flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
7888                        struct mlx5_flow_rss_desc *rss_desc)
7889 {
7890         uint64_t items = dev_flow->handle->layers;
7891         int rss_inner = 0;
7892         uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
7893
7894         dev_flow->hash_fields = 0;
7895 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
7896         if (rss_desc->level >= 2) {
7897                 dev_flow->hash_fields |= IBV_RX_HASH_INNER;
7898                 rss_inner = 1;
7899         }
7900 #endif
7901         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
7902             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
7903                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
7904                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
7905                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
7906                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
7907                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
7908                         else
7909                                 dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
7910                 }
7911         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
7912                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
7913                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
7914                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
7915                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
7916                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
7917                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
7918                         else
7919                                 dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
7920                 }
7921         }
7922         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
7923             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
7924                 if (rss_types & ETH_RSS_UDP) {
7925                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
7926                                 dev_flow->hash_fields |=
7927                                                 IBV_RX_HASH_SRC_PORT_UDP;
7928                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
7929                                 dev_flow->hash_fields |=
7930                                                 IBV_RX_HASH_DST_PORT_UDP;
7931                         else
7932                                 dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
7933                 }
7934         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
7935                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
7936                 if (rss_types & ETH_RSS_TCP) {
7937                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
7938                                 dev_flow->hash_fields |=
7939                                                 IBV_RX_HASH_SRC_PORT_TCP;
7940                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
7941                                 dev_flow->hash_fields |=
7942                                                 IBV_RX_HASH_DST_PORT_TCP;
7943                         else
7944                                 dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
7945                 }
7946         }
7947 }
7948
7949 /**
7950  * Fill the flow with DV spec, lock free
7951  * (mutex should be acquired by caller).
7952  *
7953  * @param[in] dev
7954  *   Pointer to rte_eth_dev structure.
7955  * @param[in, out] dev_flow
7956  *   Pointer to the sub flow.
7957  * @param[in] attr
7958  *   Pointer to the flow attributes.
7959  * @param[in] items
7960  *   Pointer to the list of items.
7961  * @param[in] actions
7962  *   Pointer to the list of actions.
7963  * @param[out] error
7964  *   Pointer to the error structure.
7965  *
7966  * @return
7967  *   0 on success, a negative errno value otherwise and rte_errno is set.
7968  */
7969 static int
7970 __flow_dv_translate(struct rte_eth_dev *dev,
7971                     struct mlx5_flow *dev_flow,
7972                     const struct rte_flow_attr *attr,
7973                     const struct rte_flow_item items[],
7974                     const struct rte_flow_action actions[],
7975                     struct rte_flow_error *error)
7976 {
7977         struct mlx5_priv *priv = dev->data->dev_private;
7978         struct mlx5_dev_config *dev_conf = &priv->config;
7979         struct rte_flow *flow = dev_flow->flow;
7980         struct mlx5_flow_handle *handle = dev_flow->handle;
7981         struct mlx5_flow_rss_desc *rss_desc = &((struct mlx5_flow_rss_desc *)
7982                                               priv->rss_desc)
7983                                               [!!priv->flow_nested_idx];
7984         uint64_t item_flags = 0;
7985         uint64_t last_item = 0;
7986         uint64_t action_flags = 0;
7987         uint64_t priority = attr->priority;
7988         struct mlx5_flow_dv_matcher matcher = {
7989                 .mask = {
7990                         .size = sizeof(matcher.mask.buf) -
7991                                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
7992                 },
7993         };
7994         int actions_n = 0;
7995         bool actions_end = false;
7996         union {
7997                 struct mlx5_flow_dv_modify_hdr_resource res;
7998                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
7999                             sizeof(struct mlx5_modification_cmd) *
8000                             (MLX5_MAX_MODIFY_NUM + 1)];
8001         } mhdr_dummy;
8002         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
8003         const struct rte_flow_action_count *count = NULL;
8004         const struct rte_flow_action_age *age = NULL;
8005         union flow_dv_attr flow_attr = { .attr = 0 };
8006         uint32_t tag_be;
8007         union mlx5_flow_tbl_key tbl_key;
8008         uint32_t modify_action_position = UINT32_MAX;
8009         void *match_mask = matcher.mask.buf;
8010         void *match_value = dev_flow->dv.value.buf;
8011         uint8_t next_protocol = 0xff;
8012         struct rte_vlan_hdr vlan = { 0 };
8013         uint32_t table;
8014         int ret = 0;
8015
8016         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
8017                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
8018         ret = mlx5_flow_group_to_table(attr, dev_flow->external, attr->group,
8019                                        !!priv->fdb_def_rule, &table, error);
8020         if (ret)
8021                 return ret;
8022         dev_flow->dv.group = table;
8023         if (attr->transfer)
8024                 mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
8025         if (priority == MLX5_FLOW_PRIO_RSVD)
8026                 priority = dev_conf->flow_prio - 1;
8027         /* number of actions must be set to 0 in case of dirty stack. */
8028         mhdr_res->actions_num = 0;
8029         for (; !actions_end ; actions++) {
8030                 const struct rte_flow_action_queue *queue;
8031                 const struct rte_flow_action_rss *rss;
8032                 const struct rte_flow_action *action = actions;
8033                 const uint8_t *rss_key;
8034                 const struct rte_flow_action_jump *jump_data;
8035                 const struct rte_flow_action_meter *mtr;
8036                 struct mlx5_flow_tbl_resource *tbl;
8037                 uint32_t port_id = 0;
8038                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
8039                 int action_type = actions->type;
8040                 const struct rte_flow_action *found_action = NULL;
8041                 struct mlx5_flow_meter *fm = NULL;
8042
8043                 if (!mlx5_flow_os_action_supported(action_type))
8044                         return rte_flow_error_set(error, ENOTSUP,
8045                                                   RTE_FLOW_ERROR_TYPE_ACTION,
8046                                                   actions,
8047                                                   "action not supported");
8048                 switch (action_type) {
8049                 case RTE_FLOW_ACTION_TYPE_VOID:
8050                         break;
8051                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
8052                         if (flow_dv_translate_action_port_id(dev, action,
8053                                                              &port_id, error))
8054                                 return -rte_errno;
8055                         port_id_resource.port_id = port_id;
8056                         MLX5_ASSERT(!handle->rix_port_id_action);
8057                         if (flow_dv_port_id_action_resource_register
8058                             (dev, &port_id_resource, dev_flow, error))
8059                                 return -rte_errno;
8060                         dev_flow->dv.actions[actions_n++] =
8061                                         dev_flow->dv.port_id_action->action;
8062                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
8063                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
8064                         break;
8065                 case RTE_FLOW_ACTION_TYPE_FLAG:
8066                         action_flags |= MLX5_FLOW_ACTION_FLAG;
8067                         dev_flow->handle->mark = 1;
8068                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
8069                                 struct rte_flow_action_mark mark = {
8070                                         .id = MLX5_FLOW_MARK_DEFAULT,
8071                                 };
8072
8073                                 if (flow_dv_convert_action_mark(dev, &mark,
8074                                                                 mhdr_res,
8075                                                                 error))
8076                                         return -rte_errno;
8077                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
8078                                 break;
8079                         }
8080                         tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
8081                         /*
8082                          * Only one FLAG or MARK is supported per device flow
8083                          * right now. So the pointer to the tag resource must be
8084                          * zero before the register process.
8085                          */
8086                         MLX5_ASSERT(!handle->dvh.rix_tag);
8087                         if (flow_dv_tag_resource_register(dev, tag_be,
8088                                                           dev_flow, error))
8089                                 return -rte_errno;
8090                         MLX5_ASSERT(dev_flow->dv.tag_resource);
8091                         dev_flow->dv.actions[actions_n++] =
8092                                         dev_flow->dv.tag_resource->action;
8093                         break;
8094                 case RTE_FLOW_ACTION_TYPE_MARK:
8095                         action_flags |= MLX5_FLOW_ACTION_MARK;
8096                         dev_flow->handle->mark = 1;
8097                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
8098                                 const struct rte_flow_action_mark *mark =
8099                                         (const struct rte_flow_action_mark *)
8100                                                 actions->conf;
8101
8102                                 if (flow_dv_convert_action_mark(dev, mark,
8103                                                                 mhdr_res,
8104                                                                 error))
8105                                         return -rte_errno;
8106                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
8107                                 break;
8108                         }
8109                         /* Fall-through */
8110                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
8111                         /* Legacy (non-extensive) MARK action. */
8112                         tag_be = mlx5_flow_mark_set
8113                               (((const struct rte_flow_action_mark *)
8114                                (actions->conf))->id);
8115                         MLX5_ASSERT(!handle->dvh.rix_tag);
8116                         if (flow_dv_tag_resource_register(dev, tag_be,
8117                                                           dev_flow, error))
8118                                 return -rte_errno;
8119                         MLX5_ASSERT(dev_flow->dv.tag_resource);
8120                         dev_flow->dv.actions[actions_n++] =
8121                                         dev_flow->dv.tag_resource->action;
8122                         break;
8123                 case RTE_FLOW_ACTION_TYPE_SET_META:
8124                         if (flow_dv_convert_action_set_meta
8125                                 (dev, mhdr_res, attr,
8126                                  (const struct rte_flow_action_set_meta *)
8127                                   actions->conf, error))
8128                                 return -rte_errno;
8129                         action_flags |= MLX5_FLOW_ACTION_SET_META;
8130                         break;
8131                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
8132                         if (flow_dv_convert_action_set_tag
8133                                 (dev, mhdr_res,
8134                                  (const struct rte_flow_action_set_tag *)
8135                                   actions->conf, error))
8136                                 return -rte_errno;
8137                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
8138                         break;
8139                 case RTE_FLOW_ACTION_TYPE_DROP:
8140                         action_flags |= MLX5_FLOW_ACTION_DROP;
8141                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
8142                         break;
8143                 case RTE_FLOW_ACTION_TYPE_QUEUE:
8144                         queue = actions->conf;
8145                         rss_desc->queue_num = 1;
8146                         rss_desc->queue[0] = queue->index;
8147                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
8148                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
8149                         break;
8150                 case RTE_FLOW_ACTION_TYPE_RSS:
8151                         rss = actions->conf;
8152                         memcpy(rss_desc->queue, rss->queue,
8153                                rss->queue_num * sizeof(uint16_t));
8154                         rss_desc->queue_num = rss->queue_num;
8155                         /* NULL RSS key indicates default RSS key. */
8156                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
8157                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
8158                         /*
8159                          * rss->level and rss.types should be set in advance
8160                          * when expanding items for RSS.
8161                          */
8162                         action_flags |= MLX5_FLOW_ACTION_RSS;
8163                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
8164                         break;
8165                 case RTE_FLOW_ACTION_TYPE_AGE:
8166                 case RTE_FLOW_ACTION_TYPE_COUNT:
8167                         if (!dev_conf->devx) {
8168                                 return rte_flow_error_set
8169                                               (error, ENOTSUP,
8170                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8171                                                NULL,
8172                                                "count action not supported");
8173                         }
8174                         /* Save information first, will apply later. */
8175                         if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT)
8176                                 count = action->conf;
8177                         else
8178                                 age = action->conf;
8179                         action_flags |= MLX5_FLOW_ACTION_COUNT;
8180                         break;
8181                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
8182                         dev_flow->dv.actions[actions_n++] =
8183                                                 priv->sh->pop_vlan_action;
8184                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
8185                         break;
8186                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
8187                         if (!(action_flags &
8188                               MLX5_FLOW_ACTION_OF_SET_VLAN_VID))
8189                                 flow_dev_get_vlan_info_from_items(items, &vlan);
8190                         vlan.eth_proto = rte_be_to_cpu_16
8191                              ((((const struct rte_flow_action_of_push_vlan *)
8192                                                    actions->conf)->ethertype));
8193                         found_action = mlx5_flow_find_action
8194                                         (actions + 1,
8195                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
8196                         if (found_action)
8197                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
8198                         found_action = mlx5_flow_find_action
8199                                         (actions + 1,
8200                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
8201                         if (found_action)
8202                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
8203                         if (flow_dv_create_action_push_vlan
8204                                             (dev, attr, &vlan, dev_flow, error))
8205                                 return -rte_errno;
8206                         dev_flow->dv.actions[actions_n++] =
8207                                         dev_flow->dv.push_vlan_res->action;
8208                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
8209                         break;
8210                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
8211                         /* of_vlan_push action handled this action */
8212                         MLX5_ASSERT(action_flags &
8213                                     MLX5_FLOW_ACTION_OF_PUSH_VLAN);
8214                         break;
8215                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
8216                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
8217                                 break;
8218                         flow_dev_get_vlan_info_from_items(items, &vlan);
8219                         mlx5_update_vlan_vid_pcp(actions, &vlan);
8220                         /* If no VLAN push - this is a modify header action */
8221                         if (flow_dv_convert_action_modify_vlan_vid
8222                                                 (mhdr_res, actions, error))
8223                                 return -rte_errno;
8224                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
8225                         break;
8226                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
8227                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
8228                         if (flow_dv_create_action_l2_encap(dev, actions,
8229                                                            dev_flow,
8230                                                            attr->transfer,
8231                                                            error))
8232                                 return -rte_errno;
8233                         dev_flow->dv.actions[actions_n++] =
8234                                         dev_flow->dv.encap_decap->action;
8235                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
8236                         break;
8237                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
8238                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
8239                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
8240                                                            attr->transfer,
8241                                                            error))
8242                                 return -rte_errno;
8243                         dev_flow->dv.actions[actions_n++] =
8244                                         dev_flow->dv.encap_decap->action;
8245                         action_flags |= MLX5_FLOW_ACTION_DECAP;
8246                         break;
8247                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
8248                         /* Handle encap with preceding decap. */
8249                         if (action_flags & MLX5_FLOW_ACTION_DECAP) {
8250                                 if (flow_dv_create_action_raw_encap
8251                                         (dev, actions, dev_flow, attr, error))
8252                                         return -rte_errno;
8253                                 dev_flow->dv.actions[actions_n++] =
8254                                         dev_flow->dv.encap_decap->action;
8255                         } else {
8256                                 /* Handle encap without preceding decap. */
8257                                 if (flow_dv_create_action_l2_encap
8258                                     (dev, actions, dev_flow, attr->transfer,
8259                                      error))
8260                                         return -rte_errno;
8261                                 dev_flow->dv.actions[actions_n++] =
8262                                         dev_flow->dv.encap_decap->action;
8263                         }
8264                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
8265                         break;
8266                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
8267                         while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
8268                                 ;
8269                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
8270                                 if (flow_dv_create_action_l2_decap
8271                                     (dev, dev_flow, attr->transfer, error))
8272                                         return -rte_errno;
8273                                 dev_flow->dv.actions[actions_n++] =
8274                                         dev_flow->dv.encap_decap->action;
8275                         }
8276                         /* If decap is followed by encap, handle it at encap. */
8277                         action_flags |= MLX5_FLOW_ACTION_DECAP;
8278                         break;
8279                 case RTE_FLOW_ACTION_TYPE_JUMP:
8280                         jump_data = action->conf;
8281                         ret = mlx5_flow_group_to_table(attr, dev_flow->external,
8282                                                        jump_data->group,
8283                                                        !!priv->fdb_def_rule,
8284                                                        &table, error);
8285                         if (ret)
8286                                 return ret;
8287                         tbl = flow_dv_tbl_resource_get(dev, table,
8288                                                        attr->egress,
8289                                                        attr->transfer, error);
8290                         if (!tbl)
8291                                 return rte_flow_error_set
8292                                                 (error, errno,
8293                                                  RTE_FLOW_ERROR_TYPE_ACTION,
8294                                                  NULL,
8295                                                  "cannot create jump action.");
8296                         if (flow_dv_jump_tbl_resource_register
8297                             (dev, tbl, dev_flow, error)) {
8298                                 flow_dv_tbl_resource_release(dev, tbl);
8299                                 return rte_flow_error_set
8300                                                 (error, errno,
8301                                                  RTE_FLOW_ERROR_TYPE_ACTION,
8302                                                  NULL,
8303                                                  "cannot create jump action.");
8304                         }
8305                         dev_flow->dv.actions[actions_n++] =
8306                                         dev_flow->dv.jump->action;
8307                         action_flags |= MLX5_FLOW_ACTION_JUMP;
8308                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;
8309                         break;
8310                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
8311                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
8312                         if (flow_dv_convert_action_modify_mac
8313                                         (mhdr_res, actions, error))
8314                                 return -rte_errno;
8315                         action_flags |= actions->type ==
8316                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
8317                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
8318                                         MLX5_FLOW_ACTION_SET_MAC_DST;
8319                         break;
8320                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
8321                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
8322                         if (flow_dv_convert_action_modify_ipv4
8323                                         (mhdr_res, actions, error))
8324                                 return -rte_errno;
8325                         action_flags |= actions->type ==
8326                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
8327                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
8328                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
8329                         break;
8330                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
8331                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
8332                         if (flow_dv_convert_action_modify_ipv6
8333                                         (mhdr_res, actions, error))
8334                                 return -rte_errno;
8335                         action_flags |= actions->type ==
8336                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
8337                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
8338                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
8339                         break;
8340                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
8341                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
8342                         if (flow_dv_convert_action_modify_tp
8343                                         (mhdr_res, actions, items,
8344                                          &flow_attr, dev_flow, !!(action_flags &
8345                                          MLX5_FLOW_ACTION_DECAP), error))
8346                                 return -rte_errno;
8347                         action_flags |= actions->type ==
8348                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
8349                                         MLX5_FLOW_ACTION_SET_TP_SRC :
8350                                         MLX5_FLOW_ACTION_SET_TP_DST;
8351                         break;
8352                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
8353                         if (flow_dv_convert_action_modify_dec_ttl
8354                                         (mhdr_res, items, &flow_attr, dev_flow,
8355                                          !!(action_flags &
8356                                          MLX5_FLOW_ACTION_DECAP), error))
8357                                 return -rte_errno;
8358                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
8359                         break;
8360                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
8361                         if (flow_dv_convert_action_modify_ttl
8362                                         (mhdr_res, actions, items, &flow_attr,
8363                                          dev_flow, !!(action_flags &
8364                                          MLX5_FLOW_ACTION_DECAP), error))
8365                                 return -rte_errno;
8366                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
8367                         break;
8368                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
8369                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
8370                         if (flow_dv_convert_action_modify_tcp_seq
8371                                         (mhdr_res, actions, error))
8372                                 return -rte_errno;
8373                         action_flags |= actions->type ==
8374                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
8375                                         MLX5_FLOW_ACTION_INC_TCP_SEQ :
8376                                         MLX5_FLOW_ACTION_DEC_TCP_SEQ;
8377                         break;
8378
8379                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
8380                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
8381                         if (flow_dv_convert_action_modify_tcp_ack
8382                                         (mhdr_res, actions, error))
8383                                 return -rte_errno;
8384                         action_flags |= actions->type ==
8385                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
8386                                         MLX5_FLOW_ACTION_INC_TCP_ACK :
8387                                         MLX5_FLOW_ACTION_DEC_TCP_ACK;
8388                         break;
8389                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
8390                         if (flow_dv_convert_action_set_reg
8391                                         (mhdr_res, actions, error))
8392                                 return -rte_errno;
8393                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
8394                         break;
8395                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
8396                         if (flow_dv_convert_action_copy_mreg
8397                                         (dev, mhdr_res, actions, error))
8398                                 return -rte_errno;
8399                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
8400                         break;
8401                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
8402                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
8403                         dev_flow->handle->fate_action =
8404                                         MLX5_FLOW_FATE_DEFAULT_MISS;
8405                         break;
8406                 case RTE_FLOW_ACTION_TYPE_METER:
8407                         mtr = actions->conf;
8408                         if (!flow->meter) {
8409                                 fm = mlx5_flow_meter_attach(priv, mtr->mtr_id,
8410                                                             attr, error);
8411                                 if (!fm)
8412                                         return rte_flow_error_set(error,
8413                                                 rte_errno,
8414                                                 RTE_FLOW_ERROR_TYPE_ACTION,
8415                                                 NULL,
8416                                                 "meter not found "
8417                                                 "or invalid parameters");
8418                                 flow->meter = fm->idx;
8419                         }
8420                         /* Set the meter action. */
8421                         if (!fm) {
8422                                 fm = mlx5_ipool_get(priv->sh->ipool
8423                                                 [MLX5_IPOOL_MTR], flow->meter);
8424                                 if (!fm)
8425                                         return rte_flow_error_set(error,
8426                                                 rte_errno,
8427                                                 RTE_FLOW_ERROR_TYPE_ACTION,
8428                                                 NULL,
8429                                                 "meter not found "
8430                                                 "or invalid parameters");
8431                         }
8432                         dev_flow->dv.actions[actions_n++] =
8433                                 fm->mfts->meter_action;
8434                         action_flags |= MLX5_FLOW_ACTION_METER;
8435                         break;
8436                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
8437                         if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,
8438                                                               actions, error))
8439                                 return -rte_errno;
8440                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
8441                         break;
8442                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
8443                         if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,
8444                                                               actions, error))
8445                                 return -rte_errno;
8446                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
8447                         break;
8448                 case RTE_FLOW_ACTION_TYPE_END:
8449                         actions_end = true;
8450                         if (mhdr_res->actions_num) {
8451                                 /* create modify action if needed. */
8452                                 if (flow_dv_modify_hdr_resource_register
8453                                         (dev, mhdr_res, dev_flow, error))
8454                                         return -rte_errno;
8455                                 dev_flow->dv.actions[modify_action_position] =
8456                                         handle->dvh.modify_hdr->action;
8457                         }
8458                         if (action_flags & MLX5_FLOW_ACTION_COUNT) {
8459                                 flow->counter =
8460                                         flow_dv_translate_create_counter(dev,
8461                                                 dev_flow, count, age);
8462
8463                                 if (!flow->counter)
8464                                         return rte_flow_error_set
8465                                                 (error, rte_errno,
8466                                                 RTE_FLOW_ERROR_TYPE_ACTION,
8467                                                 NULL,
8468                                                 "cannot create counter"
8469                                                 " object.");
8470                                 dev_flow->dv.actions[actions_n++] =
8471                                           (flow_dv_counter_get_by_idx(dev,
8472                                           flow->counter, NULL))->action;
8473                         }
8474                         break;
8475                 default:
8476                         break;
8477                 }
8478                 if (mhdr_res->actions_num &&
8479                     modify_action_position == UINT32_MAX)
8480                         modify_action_position = actions_n++;
8481         }
8482         dev_flow->dv.actions_n = actions_n;
8483         dev_flow->act_flags = action_flags;
8484         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
8485                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
8486                 int item_type = items->type;
8487
8488                 if (!mlx5_flow_os_item_supported(item_type))
8489                         return rte_flow_error_set(error, ENOTSUP,
8490                                                   RTE_FLOW_ERROR_TYPE_ITEM,
8491                                                   NULL, "item not supported");
8492                 switch (item_type) {
8493                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
8494                         flow_dv_translate_item_port_id(dev, match_mask,
8495                                                        match_value, items);
8496                         last_item = MLX5_FLOW_ITEM_PORT_ID;
8497                         break;
8498                 case RTE_FLOW_ITEM_TYPE_ETH:
8499                         flow_dv_translate_item_eth(match_mask, match_value,
8500                                                    items, tunnel,
8501                                                    dev_flow->dv.group);
8502                         matcher.priority = action_flags &
8503                                         MLX5_FLOW_ACTION_DEFAULT_MISS &&
8504                                         !dev_flow->external ?
8505                                         MLX5_PRIORITY_MAP_L3 :
8506                                         MLX5_PRIORITY_MAP_L2;
8507                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
8508                                              MLX5_FLOW_LAYER_OUTER_L2;
8509                         break;
8510                 case RTE_FLOW_ITEM_TYPE_VLAN:
8511                         flow_dv_translate_item_vlan(dev_flow,
8512                                                     match_mask, match_value,
8513                                                     items, tunnel,
8514                                                     dev_flow->dv.group);
8515                         matcher.priority = MLX5_PRIORITY_MAP_L2;
8516                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
8517                                               MLX5_FLOW_LAYER_INNER_VLAN) :
8518                                              (MLX5_FLOW_LAYER_OUTER_L2 |
8519                                               MLX5_FLOW_LAYER_OUTER_VLAN);
8520                         break;
8521                 case RTE_FLOW_ITEM_TYPE_IPV4:
8522                         mlx5_flow_tunnel_ip_check(items, next_protocol,
8523                                                   &item_flags, &tunnel);
8524                         flow_dv_translate_item_ipv4(match_mask, match_value,
8525                                                     items, item_flags, tunnel,
8526                                                     dev_flow->dv.group);
8527                         matcher.priority = MLX5_PRIORITY_MAP_L3;
8528                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
8529                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
8530                         if (items->mask != NULL &&
8531                             ((const struct rte_flow_item_ipv4 *)
8532                              items->mask)->hdr.next_proto_id) {
8533                                 next_protocol =
8534                                         ((const struct rte_flow_item_ipv4 *)
8535                                          (items->spec))->hdr.next_proto_id;
8536                                 next_protocol &=
8537                                         ((const struct rte_flow_item_ipv4 *)
8538                                          (items->mask))->hdr.next_proto_id;
8539                         } else {
8540                                 /* Reset for inner layer. */
8541                                 next_protocol = 0xff;
8542                         }
8543                         break;
8544                 case RTE_FLOW_ITEM_TYPE_IPV6:
8545                         mlx5_flow_tunnel_ip_check(items, next_protocol,
8546                                                   &item_flags, &tunnel);
8547                         flow_dv_translate_item_ipv6(match_mask, match_value,
8548                                                     items, item_flags, tunnel,
8549                                                     dev_flow->dv.group);
8550                         matcher.priority = MLX5_PRIORITY_MAP_L3;
8551                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
8552                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
8553                         if (items->mask != NULL &&
8554                             ((const struct rte_flow_item_ipv6 *)
8555                              items->mask)->hdr.proto) {
8556                                 next_protocol =
8557                                         ((const struct rte_flow_item_ipv6 *)
8558                                          items->spec)->hdr.proto;
8559                                 next_protocol &=
8560                                         ((const struct rte_flow_item_ipv6 *)
8561                                          items->mask)->hdr.proto;
8562                         } else {
8563                                 /* Reset for inner layer. */
8564                                 next_protocol = 0xff;
8565                         }
8566                         break;
8567                 case RTE_FLOW_ITEM_TYPE_TCP:
8568                         flow_dv_translate_item_tcp(match_mask, match_value,
8569                                                    items, tunnel);
8570                         matcher.priority = MLX5_PRIORITY_MAP_L4;
8571                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
8572                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
8573                         break;
8574                 case RTE_FLOW_ITEM_TYPE_UDP:
8575                         flow_dv_translate_item_udp(match_mask, match_value,
8576                                                    items, tunnel);
8577                         matcher.priority = MLX5_PRIORITY_MAP_L4;
8578                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
8579                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
8580                         break;
8581                 case RTE_FLOW_ITEM_TYPE_GRE:
8582                         flow_dv_translate_item_gre(match_mask, match_value,
8583                                                    items, tunnel);
8584                         matcher.priority = rss_desc->level >= 2 ?
8585                                     MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
8586                         last_item = MLX5_FLOW_LAYER_GRE;
8587                         break;
8588                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
8589                         flow_dv_translate_item_gre_key(match_mask,
8590                                                        match_value, items);
8591                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
8592                         break;
8593                 case RTE_FLOW_ITEM_TYPE_NVGRE:
8594                         flow_dv_translate_item_nvgre(match_mask, match_value,
8595                                                      items, tunnel);
8596                         matcher.priority = rss_desc->level >= 2 ?
8597                                     MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
8598                         last_item = MLX5_FLOW_LAYER_GRE;
8599                         break;
8600                 case RTE_FLOW_ITEM_TYPE_VXLAN:
8601                         flow_dv_translate_item_vxlan(match_mask, match_value,
8602                                                      items, tunnel);
8603                         matcher.priority = rss_desc->level >= 2 ?
8604                                     MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
8605                         last_item = MLX5_FLOW_LAYER_VXLAN;
8606                         break;
8607                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
8608                         flow_dv_translate_item_vxlan_gpe(match_mask,
8609                                                          match_value, items,
8610                                                          tunnel);
8611                         matcher.priority = rss_desc->level >= 2 ?
8612                                     MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
8613                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
8614                         break;
8615                 case RTE_FLOW_ITEM_TYPE_GENEVE:
8616                         flow_dv_translate_item_geneve(match_mask, match_value,
8617                                                       items, tunnel);
8618                         matcher.priority = rss_desc->level >= 2 ?
8619                                     MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
8620                         last_item = MLX5_FLOW_LAYER_GENEVE;
8621                         break;
8622                 case RTE_FLOW_ITEM_TYPE_MPLS:
8623                         flow_dv_translate_item_mpls(match_mask, match_value,
8624                                                     items, last_item, tunnel);
8625                         matcher.priority = rss_desc->level >= 2 ?
8626                                     MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
8627                         last_item = MLX5_FLOW_LAYER_MPLS;
8628                         break;
8629                 case RTE_FLOW_ITEM_TYPE_MARK:
8630                         flow_dv_translate_item_mark(dev, match_mask,
8631                                                     match_value, items);
8632                         last_item = MLX5_FLOW_ITEM_MARK;
8633                         break;
8634                 case RTE_FLOW_ITEM_TYPE_META:
8635                         flow_dv_translate_item_meta(dev, match_mask,
8636                                                     match_value, attr, items);
8637                         last_item = MLX5_FLOW_ITEM_METADATA;
8638                         break;
8639                 case RTE_FLOW_ITEM_TYPE_ICMP:
8640                         flow_dv_translate_item_icmp(match_mask, match_value,
8641                                                     items, tunnel);
8642                         last_item = MLX5_FLOW_LAYER_ICMP;
8643                         break;
8644                 case RTE_FLOW_ITEM_TYPE_ICMP6:
8645                         flow_dv_translate_item_icmp6(match_mask, match_value,
8646                                                       items, tunnel);
8647                         last_item = MLX5_FLOW_LAYER_ICMP6;
8648                         break;
8649                 case RTE_FLOW_ITEM_TYPE_TAG:
8650                         flow_dv_translate_item_tag(dev, match_mask,
8651                                                    match_value, items);
8652                         last_item = MLX5_FLOW_ITEM_TAG;
8653                         break;
8654                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
8655                         flow_dv_translate_mlx5_item_tag(dev, match_mask,
8656                                                         match_value, items);
8657                         last_item = MLX5_FLOW_ITEM_TAG;
8658                         break;
8659                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
8660                         flow_dv_translate_item_tx_queue(dev, match_mask,
8661                                                         match_value,
8662                                                         items);
8663                         last_item = MLX5_FLOW_ITEM_TX_QUEUE;
8664                         break;
8665                 case RTE_FLOW_ITEM_TYPE_GTP:
8666                         flow_dv_translate_item_gtp(match_mask, match_value,
8667                                                    items, tunnel);
8668                         matcher.priority = rss_desc->level >= 2 ?
8669                                     MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
8670                         last_item = MLX5_FLOW_LAYER_GTP;
8671                         break;
8672                 case RTE_FLOW_ITEM_TYPE_ECPRI:
8673                         if (!mlx5_flex_parser_ecpri_exist(dev)) {
8674                                 ret = mlx5_flex_parser_ecpri_alloc(dev);
8675                                 if (ret)
8676                                         return rte_flow_error_set
8677                                                 (error, ret,
8678                                                 RTE_FLOW_ERROR_TYPE_ITEM,
8679                                                 NULL,
8680                                                 "cannot create eCPRI parser");
8681                         }
8682                         /* Adjust the length matcher and device flow value. */
8683                         matcher.mask.size = MLX5_ST_SZ_BYTES(fte_match_param);
8684                         dev_flow->dv.value.size =
8685                                         MLX5_ST_SZ_BYTES(fte_match_param);
8686                         flow_dv_translate_item_ecpri(dev, match_mask,
8687                                                      match_value, items);
8688                         /* No other protocol should follow eCPRI layer. */
8689                         last_item = MLX5_FLOW_LAYER_ECPRI;
8690                         break;
8691                 default:
8692                         break;
8693                 }
8694                 item_flags |= last_item;
8695         }
8696         /*
8697          * When E-Switch mode is enabled, we have two cases where we need to
8698          * set the source port manually.
8699          * The first one, is in case of Nic steering rule, and the second is
8700          * E-Switch rule where no port_id item was found. In both cases
8701          * the source port is set according the current port in use.
8702          */
8703         if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
8704             (priv->representor || priv->master)) {
8705                 if (flow_dv_translate_item_port_id(dev, match_mask,
8706                                                    match_value, NULL))
8707                         return -rte_errno;
8708         }
8709 #ifdef RTE_LIBRTE_MLX5_DEBUG
8710         MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
8711                                               dev_flow->dv.value.buf));
8712 #endif
8713         /*
8714          * Layers may be already initialized from prefix flow if this dev_flow
8715          * is the suffix flow.
8716          */
8717         handle->layers |= item_flags;
8718         if (action_flags & MLX5_FLOW_ACTION_RSS)
8719                 flow_dv_hashfields_set(dev_flow, rss_desc);
8720         /* Register matcher. */
8721         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
8722                                     matcher.mask.size);
8723         matcher.priority = mlx5_flow_adjust_priority(dev, priority,
8724                                                      matcher.priority);
8725         /* reserved field no needs to be set to 0 here. */
8726         tbl_key.domain = attr->transfer;
8727         tbl_key.direction = attr->egress;
8728         tbl_key.table_id = dev_flow->dv.group;
8729         if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow, error))
8730                 return -rte_errno;
8731         return 0;
8732 }
8733
8734 /**
8735  * Apply the flow to the NIC, lock free,
8736  * (mutex should be acquired by caller).
8737  *
8738  * @param[in] dev
8739  *   Pointer to the Ethernet device structure.
8740  * @param[in, out] flow
8741  *   Pointer to flow structure.
8742  * @param[out] error
8743  *   Pointer to error structure.
8744  *
8745  * @return
8746  *   0 on success, a negative errno value otherwise and rte_errno is set.
8747  */
8748 static int
8749 __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
8750                 struct rte_flow_error *error)
8751 {
8752         struct mlx5_flow_dv_workspace *dv;
8753         struct mlx5_flow_handle *dh;
8754         struct mlx5_flow_handle_dv *dv_h;
8755         struct mlx5_flow *dev_flow;
8756         struct mlx5_priv *priv = dev->data->dev_private;
8757         uint32_t handle_idx;
8758         int n;
8759         int err;
8760         int idx;
8761
8762         for (idx = priv->flow_idx - 1; idx >= priv->flow_nested_idx; idx--) {
8763                 dev_flow = &((struct mlx5_flow *)priv->inter_flows)[idx];
8764                 dv = &dev_flow->dv;
8765                 dh = dev_flow->handle;
8766                 dv_h = &dh->dvh;
8767                 n = dv->actions_n;
8768                 if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
8769                         if (dv->transfer) {
8770                                 dv->actions[n++] = priv->sh->esw_drop_action;
8771                         } else {
8772                                 struct mlx5_hrxq *drop_hrxq;
8773                                 drop_hrxq = mlx5_hrxq_drop_new(dev);
8774                                 if (!drop_hrxq) {
8775                                         rte_flow_error_set
8776                                                 (error, errno,
8777                                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8778                                                  NULL,
8779                                                  "cannot get drop hash queue");
8780                                         goto error;
8781                                 }
8782                                 /*
8783                                  * Drop queues will be released by the specify
8784                                  * mlx5_hrxq_drop_release() function. Assign
8785                                  * the special index to hrxq to mark the queue
8786                                  * has been allocated.
8787                                  */
8788                                 dh->rix_hrxq = UINT32_MAX;
8789                                 dv->actions[n++] = drop_hrxq->action;
8790                         }
8791                 } else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE) {
8792                         struct mlx5_hrxq *hrxq;
8793                         uint32_t hrxq_idx;
8794                         struct mlx5_flow_rss_desc *rss_desc =
8795                                 &((struct mlx5_flow_rss_desc *)priv->rss_desc)
8796                                 [!!priv->flow_nested_idx];
8797
8798                         MLX5_ASSERT(rss_desc->queue_num);
8799                         hrxq_idx = mlx5_hrxq_get(dev, rss_desc->key,
8800                                                  MLX5_RSS_HASH_KEY_LEN,
8801                                                  dev_flow->hash_fields,
8802                                                  rss_desc->queue,
8803                                                  rss_desc->queue_num);
8804                         if (!hrxq_idx) {
8805                                 hrxq_idx = mlx5_hrxq_new
8806                                                 (dev, rss_desc->key,
8807                                                 MLX5_RSS_HASH_KEY_LEN,
8808                                                 dev_flow->hash_fields,
8809                                                 rss_desc->queue,
8810                                                 rss_desc->queue_num,
8811                                                 !!(dh->layers &
8812                                                 MLX5_FLOW_LAYER_TUNNEL));
8813                         }
8814                         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
8815                                               hrxq_idx);
8816                         if (!hrxq) {
8817                                 rte_flow_error_set
8818                                         (error, rte_errno,
8819                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8820                                          "cannot get hash queue");
8821                                 goto error;
8822                         }
8823                         dh->rix_hrxq = hrxq_idx;
8824                         dv->actions[n++] = hrxq->action;
8825                 } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
8826                         if (flow_dv_default_miss_resource_register
8827                                         (dev, error)) {
8828                                 rte_flow_error_set
8829                                         (error, rte_errno,
8830                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8831                                          "cannot create default miss resource");
8832                                 goto error_default_miss;
8833                         }
8834                         dh->rix_default_fate =  MLX5_FLOW_FATE_DEFAULT_MISS;
8835                         dv->actions[n++] = priv->sh->default_miss.action;
8836                 }
8837                 err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object,
8838                                                (void *)&dv->value, n,
8839                                                dv->actions, &dh->drv_flow);
8840                 if (err) {
8841                         rte_flow_error_set(error, errno,
8842                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8843                                            NULL,
8844                                            "hardware refuses to create flow");
8845                         goto error;
8846                 }
8847                 if (priv->vmwa_context &&
8848                     dh->vf_vlan.tag && !dh->vf_vlan.created) {
8849                         /*
8850                          * The rule contains the VLAN pattern.
8851                          * For VF we are going to create VLAN
8852                          * interface to make hypervisor set correct
8853                          * e-Switch vport context.
8854                          */
8855                         mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
8856                 }
8857         }
8858         return 0;
8859 error:
8860         if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS)
8861                 flow_dv_default_miss_resource_release(dev);
8862 error_default_miss:
8863         err = rte_errno; /* Save rte_errno before cleanup. */
8864         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
8865                        handle_idx, dh, next) {
8866                 /* hrxq is union, don't clear it if the flag is not set. */
8867                 if (dh->rix_hrxq) {
8868                         if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
8869                                 mlx5_hrxq_drop_release(dev);
8870                                 dh->rix_hrxq = 0;
8871                         } else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE) {
8872                                 mlx5_hrxq_release(dev, dh->rix_hrxq);
8873                                 dh->rix_hrxq = 0;
8874                         }
8875                 }
8876                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
8877                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
8878         }
8879         rte_errno = err; /* Restore rte_errno. */
8880         return -rte_errno;
8881 }
8882
8883 /**
8884  * Release the flow matcher.
8885  *
8886  * @param dev
8887  *   Pointer to Ethernet device.
8888  * @param handle
8889  *   Pointer to mlx5_flow_handle.
8890  *
8891  * @return
8892  *   1 while a reference on it exists, 0 when freed.
8893  */
8894 static int
8895 flow_dv_matcher_release(struct rte_eth_dev *dev,
8896                         struct mlx5_flow_handle *handle)
8897 {
8898         struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
8899
8900         MLX5_ASSERT(matcher->matcher_object);
8901         DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
8902                 dev->data->port_id, (void *)matcher,
8903                 rte_atomic32_read(&matcher->refcnt));
8904         if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
8905                 claim_zero(mlx5_flow_os_destroy_flow_matcher
8906                            (matcher->matcher_object));
8907                 LIST_REMOVE(matcher, next);
8908                 /* table ref-- in release interface. */
8909                 flow_dv_tbl_resource_release(dev, matcher->tbl);
8910                 rte_free(matcher);
8911                 DRV_LOG(DEBUG, "port %u matcher %p: removed",
8912                         dev->data->port_id, (void *)matcher);
8913                 return 0;
8914         }
8915         return 1;
8916 }
8917
8918 /**
8919  * Release an encap/decap resource.
8920  *
8921  * @param dev
8922  *   Pointer to Ethernet device.
8923  * @param handle
8924  *   Pointer to mlx5_flow_handle.
8925  *
8926  * @return
8927  *   1 while a reference on it exists, 0 when freed.
8928  */
8929 static int
8930 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
8931                                      struct mlx5_flow_handle *handle)
8932 {
8933         struct mlx5_priv *priv = dev->data->dev_private;
8934         uint32_t idx = handle->dvh.rix_encap_decap;
8935         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
8936
8937         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
8938                          idx);
8939         if (!cache_resource)
8940                 return 0;
8941         MLX5_ASSERT(cache_resource->action);
8942         DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
8943                 (void *)cache_resource,
8944                 rte_atomic32_read(&cache_resource->refcnt));
8945         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
8946                 claim_zero(mlx5_flow_os_destroy_flow_action
8947                                                 (cache_resource->action));
8948                 ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
8949                              &priv->sh->encaps_decaps, idx,
8950                              cache_resource, next);
8951                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
8952                 DRV_LOG(DEBUG, "encap/decap resource %p: removed",
8953                         (void *)cache_resource);
8954                 return 0;
8955         }
8956         return 1;
8957 }
8958
8959 /**
8960  * Release an jump to table action resource.
8961  *
8962  * @param dev
8963  *   Pointer to Ethernet device.
8964  * @param handle
8965  *   Pointer to mlx5_flow_handle.
8966  *
8967  * @return
8968  *   1 while a reference on it exists, 0 when freed.
8969  */
8970 static int
8971 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
8972                                   struct mlx5_flow_handle *handle)
8973 {
8974         struct mlx5_priv *priv = dev->data->dev_private;
8975         struct mlx5_flow_dv_jump_tbl_resource *cache_resource;
8976         struct mlx5_flow_tbl_data_entry *tbl_data;
8977
8978         tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
8979                              handle->rix_jump);
8980         if (!tbl_data)
8981                 return 0;
8982         cache_resource = &tbl_data->jump;
8983         MLX5_ASSERT(cache_resource->action);
8984         DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--",
8985                 (void *)cache_resource,
8986                 rte_atomic32_read(&cache_resource->refcnt));
8987         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
8988                 claim_zero(mlx5_flow_os_destroy_flow_action
8989                                                 (cache_resource->action));
8990                 /* jump action memory free is inside the table release. */
8991                 flow_dv_tbl_resource_release(dev, &tbl_data->tbl);
8992                 DRV_LOG(DEBUG, "jump table resource %p: removed",
8993                         (void *)cache_resource);
8994                 return 0;
8995         }
8996         return 1;
8997 }
8998
8999 /**
9000  * Release a default miss resource.
9001  *
9002  * @param dev
9003  *   Pointer to Ethernet device.
9004  * @return
9005  *   1 while a reference on it exists, 0 when freed.
9006  */
9007 static int
9008 flow_dv_default_miss_resource_release(struct rte_eth_dev *dev)
9009 {
9010         struct mlx5_priv *priv = dev->data->dev_private;
9011         struct mlx5_dev_ctx_shared *sh = priv->sh;
9012         struct mlx5_flow_default_miss_resource *cache_resource =
9013                         &sh->default_miss;
9014
9015         MLX5_ASSERT(cache_resource->action);
9016         DRV_LOG(DEBUG, "default miss resource %p: refcnt %d--",
9017                         (void *)cache_resource->action,
9018                         rte_atomic32_read(&cache_resource->refcnt));
9019         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
9020                 claim_zero(mlx5_glue->destroy_flow_action
9021                                 (cache_resource->action));
9022                 DRV_LOG(DEBUG, "default miss resource %p: removed",
9023                                 (void *)cache_resource->action);
9024                 return 0;
9025         }
9026         return 1;
9027 }
9028
9029 /**
9030  * Release a modify-header resource.
9031  *
9032  * @param handle
9033  *   Pointer to mlx5_flow_handle.
9034  *
9035  * @return
9036  *   1 while a reference on it exists, 0 when freed.
9037  */
9038 static int
9039 flow_dv_modify_hdr_resource_release(struct mlx5_flow_handle *handle)
9040 {
9041         struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
9042                                                         handle->dvh.modify_hdr;
9043
9044         MLX5_ASSERT(cache_resource->action);
9045         DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
9046                 (void *)cache_resource,
9047                 rte_atomic32_read(&cache_resource->refcnt));
9048         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
9049                 claim_zero(mlx5_flow_os_destroy_flow_action
9050                                                 (cache_resource->action));
9051                 LIST_REMOVE(cache_resource, next);
9052                 rte_free(cache_resource);
9053                 DRV_LOG(DEBUG, "modify-header resource %p: removed",
9054                         (void *)cache_resource);
9055                 return 0;
9056         }
9057         return 1;
9058 }
9059
9060 /**
9061  * Release port ID action resource.
9062  *
9063  * @param dev
9064  *   Pointer to Ethernet device.
9065  * @param handle
9066  *   Pointer to mlx5_flow_handle.
9067  *
9068  * @return
9069  *   1 while a reference on it exists, 0 when freed.
9070  */
9071 static int
9072 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
9073                                         struct mlx5_flow_handle *handle)
9074 {
9075         struct mlx5_priv *priv = dev->data->dev_private;
9076         struct mlx5_flow_dv_port_id_action_resource *cache_resource;
9077         uint32_t idx = handle->rix_port_id_action;
9078
9079         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID],
9080                                         idx);
9081         if (!cache_resource)
9082                 return 0;
9083         MLX5_ASSERT(cache_resource->action);
9084         DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--",
9085                 (void *)cache_resource,
9086                 rte_atomic32_read(&cache_resource->refcnt));
9087         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
9088                 claim_zero(mlx5_flow_os_destroy_flow_action
9089                                                 (cache_resource->action));
9090                 ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_PORT_ID],
9091                              &priv->sh->port_id_action_list, idx,
9092                              cache_resource, next);
9093                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_PORT_ID], idx);
9094                 DRV_LOG(DEBUG, "port id action resource %p: removed",
9095                         (void *)cache_resource);
9096                 return 0;
9097         }
9098         return 1;
9099 }
9100
9101 /**
9102  * Release push vlan action resource.
9103  *
9104  * @param dev
9105  *   Pointer to Ethernet device.
9106  * @param handle
9107  *   Pointer to mlx5_flow_handle.
9108  *
9109  * @return
9110  *   1 while a reference on it exists, 0 when freed.
9111  */
9112 static int
9113 flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev,
9114                                           struct mlx5_flow_handle *handle)
9115 {
9116         struct mlx5_priv *priv = dev->data->dev_private;
9117         uint32_t idx = handle->dvh.rix_push_vlan;
9118         struct mlx5_flow_dv_push_vlan_action_resource *cache_resource;
9119
9120         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN],
9121                                         idx);
9122         if (!cache_resource)
9123                 return 0;
9124         MLX5_ASSERT(cache_resource->action);
9125         DRV_LOG(DEBUG, "push VLAN action resource %p: refcnt %d--",
9126                 (void *)cache_resource,
9127                 rte_atomic32_read(&cache_resource->refcnt));
9128         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
9129                 claim_zero(mlx5_flow_os_destroy_flow_action
9130                                                 (cache_resource->action));
9131                 ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN],
9132                              &priv->sh->push_vlan_action_list, idx,
9133                              cache_resource, next);
9134                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
9135                 DRV_LOG(DEBUG, "push vlan action resource %p: removed",
9136                         (void *)cache_resource);
9137                 return 0;
9138         }
9139         return 1;
9140 }
9141
9142 /**
9143  * Release the fate resource.
9144  *
9145  * @param dev
9146  *   Pointer to Ethernet device.
9147  * @param handle
9148  *   Pointer to mlx5_flow_handle.
9149  */
9150 static void
9151 flow_dv_fate_resource_release(struct rte_eth_dev *dev,
9152                                struct mlx5_flow_handle *handle)
9153 {
9154         if (!handle->rix_fate)
9155                 return;
9156         switch (handle->fate_action) {
9157         case MLX5_FLOW_FATE_DROP:
9158                 mlx5_hrxq_drop_release(dev);
9159                 break;
9160         case MLX5_FLOW_FATE_QUEUE:
9161                 mlx5_hrxq_release(dev, handle->rix_hrxq);
9162                 break;
9163         case MLX5_FLOW_FATE_JUMP:
9164                 flow_dv_jump_tbl_resource_release(dev, handle);
9165                 break;
9166         case MLX5_FLOW_FATE_PORT_ID:
9167                 flow_dv_port_id_action_resource_release(dev, handle);
9168                 break;
9169         case MLX5_FLOW_FATE_DEFAULT_MISS:
9170                 flow_dv_default_miss_resource_release(dev);
9171                 break;
9172         default:
9173                 DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
9174                 break;
9175         }
9176         handle->rix_fate = 0;
9177 }
9178
9179 /**
9180  * Remove the flow from the NIC but keeps it in memory.
9181  * Lock free, (mutex should be acquired by caller).
9182  *
9183  * @param[in] dev
9184  *   Pointer to Ethernet device.
9185  * @param[in, out] flow
9186  *   Pointer to flow structure.
9187  */
9188 static void
9189 __flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
9190 {
9191         struct mlx5_flow_handle *dh;
9192         uint32_t handle_idx;
9193         struct mlx5_priv *priv = dev->data->dev_private;
9194
9195         if (!flow)
9196                 return;
9197         handle_idx = flow->dev_handles;
9198         while (handle_idx) {
9199                 dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
9200                                     handle_idx);
9201                 if (!dh)
9202                         return;
9203                 if (dh->drv_flow) {
9204                         claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow));
9205                         dh->drv_flow = NULL;
9206                 }
9207                 if (dh->fate_action == MLX5_FLOW_FATE_DROP ||
9208                     dh->fate_action == MLX5_FLOW_FATE_QUEUE ||
9209                     dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS)
9210                         flow_dv_fate_resource_release(dev, dh);
9211                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
9212                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
9213                 handle_idx = dh->next.next;
9214         }
9215 }
9216
9217 /**
9218  * Remove the flow from the NIC and the memory.
9219  * Lock free, (mutex should be acquired by caller).
9220  *
9221  * @param[in] dev
9222  *   Pointer to the Ethernet device structure.
9223  * @param[in, out] flow
9224  *   Pointer to flow structure.
9225  */
9226 static void
9227 __flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
9228 {
9229         struct mlx5_flow_handle *dev_handle;
9230         struct mlx5_priv *priv = dev->data->dev_private;
9231
9232         if (!flow)
9233                 return;
9234         __flow_dv_remove(dev, flow);
9235         if (flow->counter) {
9236                 flow_dv_counter_release(dev, flow->counter);
9237                 flow->counter = 0;
9238         }
9239         if (flow->meter) {
9240                 struct mlx5_flow_meter *fm;
9241
9242                 fm = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MTR],
9243                                     flow->meter);
9244                 if (fm)
9245                         mlx5_flow_meter_detach(fm);
9246                 flow->meter = 0;
9247         }
9248         while (flow->dev_handles) {
9249                 uint32_t tmp_idx = flow->dev_handles;
9250
9251                 dev_handle = mlx5_ipool_get(priv->sh->ipool
9252                                             [MLX5_IPOOL_MLX5_FLOW], tmp_idx);
9253                 if (!dev_handle)
9254                         return;
9255                 flow->dev_handles = dev_handle->next.next;
9256                 if (dev_handle->dvh.matcher)
9257                         flow_dv_matcher_release(dev, dev_handle);
9258                 if (dev_handle->dvh.rix_encap_decap)
9259                         flow_dv_encap_decap_resource_release(dev, dev_handle);
9260                 if (dev_handle->dvh.modify_hdr)
9261                         flow_dv_modify_hdr_resource_release(dev_handle);
9262                 if (dev_handle->dvh.rix_push_vlan)
9263                         flow_dv_push_vlan_action_resource_release(dev,
9264                                                                   dev_handle);
9265                 if (dev_handle->dvh.rix_tag)
9266                         flow_dv_tag_release(dev,
9267                                             dev_handle->dvh.rix_tag);
9268                 flow_dv_fate_resource_release(dev, dev_handle);
9269                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
9270                            tmp_idx);
9271         }
9272 }
9273
9274 /**
9275  * Query a dv flow  rule for its statistics via devx.
9276  *
9277  * @param[in] dev
9278  *   Pointer to Ethernet device.
9279  * @param[in] flow
9280  *   Pointer to the sub flow.
9281  * @param[out] data
9282  *   data retrieved by the query.
9283  * @param[out] error
9284  *   Perform verbose error reporting if not NULL.
9285  *
9286  * @return
9287  *   0 on success, a negative errno value otherwise and rte_errno is set.
9288  */
9289 static int
9290 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
9291                     void *data, struct rte_flow_error *error)
9292 {
9293         struct mlx5_priv *priv = dev->data->dev_private;
9294         struct rte_flow_query_count *qc = data;
9295
9296         if (!priv->config.devx)
9297                 return rte_flow_error_set(error, ENOTSUP,
9298                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9299                                           NULL,
9300                                           "counters are not supported");
9301         if (flow->counter) {
9302                 uint64_t pkts, bytes;
9303                 struct mlx5_flow_counter *cnt;
9304
9305                 cnt = flow_dv_counter_get_by_idx(dev, flow->counter,
9306                                                  NULL);
9307                 int err = _flow_dv_query_count(dev, flow->counter, &pkts,
9308                                                &bytes);
9309
9310                 if (err)
9311                         return rte_flow_error_set(error, -err,
9312                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9313                                         NULL, "cannot read counters");
9314                 qc->hits_set = 1;
9315                 qc->bytes_set = 1;
9316                 qc->hits = pkts - cnt->hits;
9317                 qc->bytes = bytes - cnt->bytes;
9318                 if (qc->reset) {
9319                         cnt->hits = pkts;
9320                         cnt->bytes = bytes;
9321                 }
9322                 return 0;
9323         }
9324         return rte_flow_error_set(error, EINVAL,
9325                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9326                                   NULL,
9327                                   "counters are not available");
9328 }
9329
9330 /**
9331  * Query a flow.
9332  *
9333  * @see rte_flow_query()
9334  * @see rte_flow_ops
9335  */
9336 static int
9337 flow_dv_query(struct rte_eth_dev *dev,
9338               struct rte_flow *flow __rte_unused,
9339               const struct rte_flow_action *actions __rte_unused,
9340               void *data __rte_unused,
9341               struct rte_flow_error *error __rte_unused)
9342 {
9343         int ret = -EINVAL;
9344
9345         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
9346                 switch (actions->type) {
9347                 case RTE_FLOW_ACTION_TYPE_VOID:
9348                         break;
9349                 case RTE_FLOW_ACTION_TYPE_COUNT:
9350                         ret = flow_dv_query_count(dev, flow, data, error);
9351                         break;
9352                 default:
9353                         return rte_flow_error_set(error, ENOTSUP,
9354                                                   RTE_FLOW_ERROR_TYPE_ACTION,
9355                                                   actions,
9356                                                   "action not supported");
9357                 }
9358         }
9359         return ret;
9360 }
9361
9362 /**
9363  * Destroy the meter table set.
9364  * Lock free, (mutex should be acquired by caller).
9365  *
9366  * @param[in] dev
9367  *   Pointer to Ethernet device.
9368  * @param[in] tbl
9369  *   Pointer to the meter table set.
9370  *
9371  * @return
9372  *   Always 0.
9373  */
9374 static int
9375 flow_dv_destroy_mtr_tbl(struct rte_eth_dev *dev,
9376                         struct mlx5_meter_domains_infos *tbl)
9377 {
9378         struct mlx5_priv *priv = dev->data->dev_private;
9379         struct mlx5_meter_domains_infos *mtd =
9380                                 (struct mlx5_meter_domains_infos *)tbl;
9381
9382         if (!mtd || !priv->config.dv_flow_en)
9383                 return 0;
9384         if (mtd->ingress.policer_rules[RTE_MTR_DROPPED])
9385                 claim_zero(mlx5_flow_os_destroy_flow
9386                            (mtd->ingress.policer_rules[RTE_MTR_DROPPED]));
9387         if (mtd->egress.policer_rules[RTE_MTR_DROPPED])
9388                 claim_zero(mlx5_flow_os_destroy_flow
9389                            (mtd->egress.policer_rules[RTE_MTR_DROPPED]));
9390         if (mtd->transfer.policer_rules[RTE_MTR_DROPPED])
9391                 claim_zero(mlx5_flow_os_destroy_flow
9392                            (mtd->transfer.policer_rules[RTE_MTR_DROPPED]));
9393         if (mtd->egress.color_matcher)
9394                 claim_zero(mlx5_flow_os_destroy_flow_matcher
9395                            (mtd->egress.color_matcher));
9396         if (mtd->egress.any_matcher)
9397                 claim_zero(mlx5_flow_os_destroy_flow_matcher
9398                            (mtd->egress.any_matcher));
9399         if (mtd->egress.tbl)
9400                 flow_dv_tbl_resource_release(dev, mtd->egress.tbl);
9401         if (mtd->egress.sfx_tbl)
9402                 flow_dv_tbl_resource_release(dev, mtd->egress.sfx_tbl);
9403         if (mtd->ingress.color_matcher)
9404                 claim_zero(mlx5_flow_os_destroy_flow_matcher
9405                            (mtd->ingress.color_matcher));
9406         if (mtd->ingress.any_matcher)
9407                 claim_zero(mlx5_flow_os_destroy_flow_matcher
9408                            (mtd->ingress.any_matcher));
9409         if (mtd->ingress.tbl)
9410                 flow_dv_tbl_resource_release(dev, mtd->ingress.tbl);
9411         if (mtd->ingress.sfx_tbl)
9412                 flow_dv_tbl_resource_release(dev, mtd->ingress.sfx_tbl);
9413         if (mtd->transfer.color_matcher)
9414                 claim_zero(mlx5_flow_os_destroy_flow_matcher
9415                            (mtd->transfer.color_matcher));
9416         if (mtd->transfer.any_matcher)
9417                 claim_zero(mlx5_flow_os_destroy_flow_matcher
9418                            (mtd->transfer.any_matcher));
9419         if (mtd->transfer.tbl)
9420                 flow_dv_tbl_resource_release(dev, mtd->transfer.tbl);
9421         if (mtd->transfer.sfx_tbl)
9422                 flow_dv_tbl_resource_release(dev, mtd->transfer.sfx_tbl);
9423         if (mtd->drop_actn)
9424                 claim_zero(mlx5_flow_os_destroy_flow_action(mtd->drop_actn));
9425         rte_free(mtd);
9426         return 0;
9427 }
9428
9429 /* Number of meter flow actions, count and jump or count and drop. */
9430 #define METER_ACTIONS 2
9431
9432 /**
9433  * Create specify domain meter table and suffix table.
9434  *
9435  * @param[in] dev
9436  *   Pointer to Ethernet device.
9437  * @param[in,out] mtb
9438  *   Pointer to DV meter table set.
9439  * @param[in] egress
9440  *   Table attribute.
9441  * @param[in] transfer
9442  *   Table attribute.
9443  * @param[in] color_reg_c_idx
9444  *   Reg C index for color match.
9445  *
9446  * @return
9447  *   0 on success, -1 otherwise and rte_errno is set.
9448  */
9449 static int
9450 flow_dv_prepare_mtr_tables(struct rte_eth_dev *dev,
9451                            struct mlx5_meter_domains_infos *mtb,
9452                            uint8_t egress, uint8_t transfer,
9453                            uint32_t color_reg_c_idx)
9454 {
9455         struct mlx5_priv *priv = dev->data->dev_private;
9456         struct mlx5_dev_ctx_shared *sh = priv->sh;
9457         struct mlx5_flow_dv_match_params mask = {
9458                 .size = sizeof(mask.buf),
9459         };
9460         struct mlx5_flow_dv_match_params value = {
9461                 .size = sizeof(value.buf),
9462         };
9463         struct mlx5dv_flow_matcher_attr dv_attr = {
9464                 .type = IBV_FLOW_ATTR_NORMAL,
9465                 .priority = 0,
9466                 .match_criteria_enable = 0,
9467                 .match_mask = (void *)&mask,
9468         };
9469         void *actions[METER_ACTIONS];
9470         struct mlx5_meter_domain_info *dtb;
9471         struct rte_flow_error error;
9472         int i = 0;
9473         int ret;
9474
9475         if (transfer)
9476                 dtb = &mtb->transfer;
9477         else if (egress)
9478                 dtb = &mtb->egress;
9479         else
9480                 dtb = &mtb->ingress;
9481         /* Create the meter table with METER level. */
9482         dtb->tbl = flow_dv_tbl_resource_get(dev, MLX5_FLOW_TABLE_LEVEL_METER,
9483                                             egress, transfer, &error);
9484         if (!dtb->tbl) {
9485                 DRV_LOG(ERR, "Failed to create meter policer table.");
9486                 return -1;
9487         }
9488         /* Create the meter suffix table with SUFFIX level. */
9489         dtb->sfx_tbl = flow_dv_tbl_resource_get(dev,
9490                                             MLX5_FLOW_TABLE_LEVEL_SUFFIX,
9491                                             egress, transfer, &error);
9492         if (!dtb->sfx_tbl) {
9493                 DRV_LOG(ERR, "Failed to create meter suffix table.");
9494                 return -1;
9495         }
9496         /* Create matchers, Any and Color. */
9497         dv_attr.priority = 3;
9498         dv_attr.match_criteria_enable = 0;
9499         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj,
9500                                                &dtb->any_matcher);
9501         if (ret) {
9502                 DRV_LOG(ERR, "Failed to create meter"
9503                              " policer default matcher.");
9504                 goto error_exit;
9505         }
9506         dv_attr.priority = 0;
9507         dv_attr.match_criteria_enable =
9508                                 1 << MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
9509         flow_dv_match_meta_reg(mask.buf, value.buf, color_reg_c_idx,
9510                                rte_col_2_mlx5_col(RTE_COLORS), UINT8_MAX);
9511         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj,
9512                                                &dtb->color_matcher);
9513         if (ret) {
9514                 DRV_LOG(ERR, "Failed to create meter policer color matcher.");
9515                 goto error_exit;
9516         }
9517         if (mtb->count_actns[RTE_MTR_DROPPED])
9518                 actions[i++] = mtb->count_actns[RTE_MTR_DROPPED];
9519         actions[i++] = mtb->drop_actn;
9520         /* Default rule: lowest priority, match any, actions: drop. */
9521         ret = mlx5_flow_os_create_flow(dtb->any_matcher, (void *)&value, i,
9522                                        actions,
9523                                        &dtb->policer_rules[RTE_MTR_DROPPED]);
9524         if (ret) {
9525                 DRV_LOG(ERR, "Failed to create meter policer drop rule.");
9526                 goto error_exit;
9527         }
9528         return 0;
9529 error_exit:
9530         return -1;
9531 }
9532
9533 /**
9534  * Create the needed meter and suffix tables.
9535  * Lock free, (mutex should be acquired by caller).
9536  *
9537  * @param[in] dev
9538  *   Pointer to Ethernet device.
9539  * @param[in] fm
9540  *   Pointer to the flow meter.
9541  *
9542  * @return
9543  *   Pointer to table set on success, NULL otherwise and rte_errno is set.
9544  */
9545 static struct mlx5_meter_domains_infos *
9546 flow_dv_create_mtr_tbl(struct rte_eth_dev *dev,
9547                        const struct mlx5_flow_meter *fm)
9548 {
9549         struct mlx5_priv *priv = dev->data->dev_private;
9550         struct mlx5_meter_domains_infos *mtb;
9551         int ret;
9552         int i;
9553
9554         if (!priv->mtr_en) {
9555                 rte_errno = ENOTSUP;
9556                 return NULL;
9557         }
9558         mtb = rte_calloc(__func__, 1, sizeof(*mtb), 0);
9559         if (!mtb) {
9560                 DRV_LOG(ERR, "Failed to allocate memory for meter.");
9561                 return NULL;
9562         }
9563         /* Create meter count actions */
9564         for (i = 0; i <= RTE_MTR_DROPPED; i++) {
9565                 struct mlx5_flow_counter *cnt;
9566                 if (!fm->policer_stats.cnt[i])
9567                         continue;
9568                 cnt = flow_dv_counter_get_by_idx(dev,
9569                       fm->policer_stats.cnt[i], NULL);
9570                 mtb->count_actns[i] = cnt->action;
9571         }
9572         /* Create drop action. */
9573         ret = mlx5_flow_os_create_flow_action_drop(&mtb->drop_actn);
9574         if (ret) {
9575                 DRV_LOG(ERR, "Failed to create drop action.");
9576                 goto error_exit;
9577         }
9578         /* Egress meter table. */
9579         ret = flow_dv_prepare_mtr_tables(dev, mtb, 1, 0, priv->mtr_color_reg);
9580         if (ret) {
9581                 DRV_LOG(ERR, "Failed to prepare egress meter table.");
9582                 goto error_exit;
9583         }
9584         /* Ingress meter table. */
9585         ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 0, priv->mtr_color_reg);
9586         if (ret) {
9587                 DRV_LOG(ERR, "Failed to prepare ingress meter table.");
9588                 goto error_exit;
9589         }
9590         /* FDB meter table. */
9591         if (priv->config.dv_esw_en) {
9592                 ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 1,
9593                                                  priv->mtr_color_reg);
9594                 if (ret) {
9595                         DRV_LOG(ERR, "Failed to prepare fdb meter table.");
9596                         goto error_exit;
9597                 }
9598         }
9599         return mtb;
9600 error_exit:
9601         flow_dv_destroy_mtr_tbl(dev, mtb);
9602         return NULL;
9603 }
9604
9605 /**
9606  * Destroy domain policer rule.
9607  *
9608  * @param[in] dt
9609  *   Pointer to domain table.
9610  */
9611 static void
9612 flow_dv_destroy_domain_policer_rule(struct mlx5_meter_domain_info *dt)
9613 {
9614         int i;
9615
9616         for (i = 0; i < RTE_MTR_DROPPED; i++) {
9617                 if (dt->policer_rules[i]) {
9618                         claim_zero(mlx5_flow_os_destroy_flow
9619                                    (dt->policer_rules[i]));
9620                         dt->policer_rules[i] = NULL;
9621                 }
9622         }
9623         if (dt->jump_actn) {
9624                 claim_zero(mlx5_flow_os_destroy_flow_action(dt->jump_actn));
9625                 dt->jump_actn = NULL;
9626         }
9627 }
9628
9629 /**
9630  * Destroy policer rules.
9631  *
9632  * @param[in] dev
9633  *   Pointer to Ethernet device.
9634  * @param[in] fm
9635  *   Pointer to flow meter structure.
9636  * @param[in] attr
9637  *   Pointer to flow attributes.
9638  *
9639  * @return
9640  *   Always 0.
9641  */
9642 static int
9643 flow_dv_destroy_policer_rules(struct rte_eth_dev *dev __rte_unused,
9644                               const struct mlx5_flow_meter *fm,
9645                               const struct rte_flow_attr *attr)
9646 {
9647         struct mlx5_meter_domains_infos *mtb = fm ? fm->mfts : NULL;
9648
9649         if (!mtb)
9650                 return 0;
9651         if (attr->egress)
9652                 flow_dv_destroy_domain_policer_rule(&mtb->egress);
9653         if (attr->ingress)
9654                 flow_dv_destroy_domain_policer_rule(&mtb->ingress);
9655         if (attr->transfer)
9656                 flow_dv_destroy_domain_policer_rule(&mtb->transfer);
9657         return 0;
9658 }
9659
9660 /**
9661  * Create specify domain meter policer rule.
9662  *
9663  * @param[in] fm
9664  *   Pointer to flow meter structure.
9665  * @param[in] mtb
9666  *   Pointer to DV meter table set.
9667  * @param[in] mtr_reg_c
9668  *   Color match REG_C.
9669  *
9670  * @return
9671  *   0 on success, -1 otherwise.
9672  */
9673 static int
9674 flow_dv_create_policer_forward_rule(struct mlx5_flow_meter *fm,
9675                                     struct mlx5_meter_domain_info *dtb,
9676                                     uint8_t mtr_reg_c)
9677 {
9678         struct mlx5_flow_dv_match_params matcher = {
9679                 .size = sizeof(matcher.buf),
9680         };
9681         struct mlx5_flow_dv_match_params value = {
9682                 .size = sizeof(value.buf),
9683         };
9684         struct mlx5_meter_domains_infos *mtb = fm->mfts;
9685         void *actions[METER_ACTIONS];
9686         int i;
9687         int ret = 0;
9688
9689         /* Create jump action. */
9690         if (!dtb->jump_actn)
9691                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
9692                                 (dtb->sfx_tbl->obj, &dtb->jump_actn);
9693         if (ret) {
9694                 DRV_LOG(ERR, "Failed to create policer jump action.");
9695                 goto error;
9696         }
9697         for (i = 0; i < RTE_MTR_DROPPED; i++) {
9698                 int j = 0;
9699
9700                 flow_dv_match_meta_reg(matcher.buf, value.buf, mtr_reg_c,
9701                                        rte_col_2_mlx5_col(i), UINT8_MAX);
9702                 if (mtb->count_actns[i])
9703                         actions[j++] = mtb->count_actns[i];
9704                 if (fm->action[i] == MTR_POLICER_ACTION_DROP)
9705                         actions[j++] = mtb->drop_actn;
9706                 else
9707                         actions[j++] = dtb->jump_actn;
9708                 ret = mlx5_flow_os_create_flow(dtb->color_matcher,
9709                                                (void *)&value, j, actions,
9710                                                &dtb->policer_rules[i]);
9711                 if (ret) {
9712                         DRV_LOG(ERR, "Failed to create policer rule.");
9713                         goto error;
9714                 }
9715         }
9716         return 0;
9717 error:
9718         rte_errno = errno;
9719         return -1;
9720 }
9721
9722 /**
9723  * Create policer rules.
9724  *
9725  * @param[in] dev
9726  *   Pointer to Ethernet device.
9727  * @param[in] fm
9728  *   Pointer to flow meter structure.
9729  * @param[in] attr
9730  *   Pointer to flow attributes.
9731  *
9732  * @return
9733  *   0 on success, -1 otherwise.
9734  */
9735 static int
9736 flow_dv_create_policer_rules(struct rte_eth_dev *dev,
9737                              struct mlx5_flow_meter *fm,
9738                              const struct rte_flow_attr *attr)
9739 {
9740         struct mlx5_priv *priv = dev->data->dev_private;
9741         struct mlx5_meter_domains_infos *mtb = fm->mfts;
9742         int ret;
9743
9744         if (attr->egress) {
9745                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->egress,
9746                                                 priv->mtr_color_reg);
9747                 if (ret) {
9748                         DRV_LOG(ERR, "Failed to create egress policer.");
9749                         goto error;
9750                 }
9751         }
9752         if (attr->ingress) {
9753                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->ingress,
9754                                                 priv->mtr_color_reg);
9755                 if (ret) {
9756                         DRV_LOG(ERR, "Failed to create ingress policer.");
9757                         goto error;
9758                 }
9759         }
9760         if (attr->transfer) {
9761                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->transfer,
9762                                                 priv->mtr_color_reg);
9763                 if (ret) {
9764                         DRV_LOG(ERR, "Failed to create transfer policer.");
9765                         goto error;
9766                 }
9767         }
9768         return 0;
9769 error:
9770         flow_dv_destroy_policer_rules(dev, fm, attr);
9771         return -1;
9772 }
9773
9774 /**
9775  * Query a devx counter.
9776  *
9777  * @param[in] dev
9778  *   Pointer to the Ethernet device structure.
9779  * @param[in] cnt
9780  *   Index to the flow counter.
9781  * @param[in] clear
9782  *   Set to clear the counter statistics.
9783  * @param[out] pkts
9784  *   The statistics value of packets.
9785  * @param[out] bytes
9786  *   The statistics value of bytes.
9787  *
9788  * @return
9789  *   0 on success, otherwise return -1.
9790  */
9791 static int
9792 flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear,
9793                       uint64_t *pkts, uint64_t *bytes)
9794 {
9795         struct mlx5_priv *priv = dev->data->dev_private;
9796         struct mlx5_flow_counter *cnt;
9797         uint64_t inn_pkts, inn_bytes;
9798         int ret;
9799
9800         if (!priv->config.devx)
9801                 return -1;
9802
9803         ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes);
9804         if (ret)
9805                 return -1;
9806         cnt = flow_dv_counter_get_by_idx(dev, counter, NULL);
9807         *pkts = inn_pkts - cnt->hits;
9808         *bytes = inn_bytes - cnt->bytes;
9809         if (clear) {
9810                 cnt->hits = inn_pkts;
9811                 cnt->bytes = inn_bytes;
9812         }
9813         return 0;
9814 }
9815
9816 /**
9817  * Get aged-out flows.
9818  *
9819  * @param[in] dev
9820  *   Pointer to the Ethernet device structure.
9821  * @param[in] context
9822  *   The address of an array of pointers to the aged-out flows contexts.
9823  * @param[in] nb_contexts
9824  *   The length of context array pointers.
9825  * @param[out] error
9826  *   Perform verbose error reporting if not NULL. Initialized in case of
9827  *   error only.
9828  *
9829  * @return
9830  *   how many contexts get in success, otherwise negative errno value.
9831  *   if nb_contexts is 0, return the amount of all aged contexts.
9832  *   if nb_contexts is not 0 , return the amount of aged flows reported
9833  *   in the context array.
9834  * @note: only stub for now
9835  */
9836 static int
9837 flow_get_aged_flows(struct rte_eth_dev *dev,
9838                     void **context,
9839                     uint32_t nb_contexts,
9840                     struct rte_flow_error *error)
9841 {
9842         struct mlx5_priv *priv = dev->data->dev_private;
9843         struct mlx5_age_info *age_info;
9844         struct mlx5_age_param *age_param;
9845         struct mlx5_flow_counter *counter;
9846         int nb_flows = 0;
9847
9848         if (nb_contexts && !context)
9849                 return rte_flow_error_set(error, EINVAL,
9850                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9851                                           NULL,
9852                                           "Should assign at least one flow or"
9853                                           " context to get if nb_contexts != 0");
9854         age_info = GET_PORT_AGE_INFO(priv);
9855         rte_spinlock_lock(&age_info->aged_sl);
9856         TAILQ_FOREACH(counter, &age_info->aged_counters, next) {
9857                 nb_flows++;
9858                 if (nb_contexts) {
9859                         age_param = MLX5_CNT_TO_AGE(counter);
9860                         context[nb_flows - 1] = age_param->context;
9861                         if (!(--nb_contexts))
9862                                 break;
9863                 }
9864         }
9865         rte_spinlock_unlock(&age_info->aged_sl);
9866         MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
9867         return nb_flows;
9868 }
9869
9870 /*
9871  * Mutex-protected thunk to lock-free  __flow_dv_translate().
9872  */
9873 static int
9874 flow_dv_translate(struct rte_eth_dev *dev,
9875                   struct mlx5_flow *dev_flow,
9876                   const struct rte_flow_attr *attr,
9877                   const struct rte_flow_item items[],
9878                   const struct rte_flow_action actions[],
9879                   struct rte_flow_error *error)
9880 {
9881         int ret;
9882
9883         flow_dv_shared_lock(dev);
9884         ret = __flow_dv_translate(dev, dev_flow, attr, items, actions, error);
9885         flow_dv_shared_unlock(dev);
9886         return ret;
9887 }
9888
9889 /*
9890  * Mutex-protected thunk to lock-free  __flow_dv_apply().
9891  */
9892 static int
9893 flow_dv_apply(struct rte_eth_dev *dev,
9894               struct rte_flow *flow,
9895               struct rte_flow_error *error)
9896 {
9897         int ret;
9898
9899         flow_dv_shared_lock(dev);
9900         ret = __flow_dv_apply(dev, flow, error);
9901         flow_dv_shared_unlock(dev);
9902         return ret;
9903 }
9904
9905 /*
9906  * Mutex-protected thunk to lock-free __flow_dv_remove().
9907  */
9908 static void
9909 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
9910 {
9911         flow_dv_shared_lock(dev);
9912         __flow_dv_remove(dev, flow);
9913         flow_dv_shared_unlock(dev);
9914 }
9915
9916 /*
9917  * Mutex-protected thunk to lock-free __flow_dv_destroy().
9918  */
9919 static void
9920 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
9921 {
9922         flow_dv_shared_lock(dev);
9923         __flow_dv_destroy(dev, flow);
9924         flow_dv_shared_unlock(dev);
9925 }
9926
9927 /*
9928  * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
9929  */
9930 static uint32_t
9931 flow_dv_counter_allocate(struct rte_eth_dev *dev)
9932 {
9933         uint32_t cnt;
9934
9935         flow_dv_shared_lock(dev);
9936         cnt = flow_dv_counter_alloc(dev, 0, 0, 1, 0);
9937         flow_dv_shared_unlock(dev);
9938         return cnt;
9939 }
9940
9941 /*
9942  * Mutex-protected thunk to lock-free flow_dv_counter_release().
9943  */
9944 static void
9945 flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t cnt)
9946 {
9947         flow_dv_shared_lock(dev);
9948         flow_dv_counter_release(dev, cnt);
9949         flow_dv_shared_unlock(dev);
9950 }
9951
9952 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
9953         .validate = flow_dv_validate,
9954         .prepare = flow_dv_prepare,
9955         .translate = flow_dv_translate,
9956         .apply = flow_dv_apply,
9957         .remove = flow_dv_remove,
9958         .destroy = flow_dv_destroy,
9959         .query = flow_dv_query,
9960         .create_mtr_tbls = flow_dv_create_mtr_tbl,
9961         .destroy_mtr_tbls = flow_dv_destroy_mtr_tbl,
9962         .create_policer_rules = flow_dv_create_policer_rules,
9963         .destroy_policer_rules = flow_dv_destroy_policer_rules,
9964         .counter_alloc = flow_dv_counter_allocate,
9965         .counter_free = flow_dv_counter_free,
9966         .counter_query = flow_dv_counter_query,
9967         .get_aged_flows = flow_get_aged_flows,
9968 };
9969
9970 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */