net/mlx5: fix count actions query in sample flow
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <ethdev_driver.h>
14 #include <rte_flow.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_cycles.h>
18 #include <rte_ip.h>
19 #include <rte_gre.h>
20 #include <rte_vxlan.h>
21 #include <rte_gtp.h>
22 #include <rte_eal_paging.h>
23 #include <rte_mpls.h>
24
25 #include <mlx5_glue.h>
26 #include <mlx5_devx_cmds.h>
27 #include <mlx5_prm.h>
28 #include <mlx5_malloc.h>
29
30 #include "mlx5_defs.h"
31 #include "mlx5.h"
32 #include "mlx5_common_os.h"
33 #include "mlx5_flow.h"
34 #include "mlx5_flow_os.h"
35 #include "mlx5_rxtx.h"
36 #include "rte_pmd_mlx5.h"
37
38 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
39
40 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
41 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
42 #endif
43
44 #ifndef HAVE_MLX5DV_DR_ESWITCH
45 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
46 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
47 #endif
48 #endif
49
50 #ifndef HAVE_MLX5DV_DR
51 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
52 #endif
53
54 /* VLAN header definitions */
55 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
56 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
57 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
58 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
59 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
60
61 union flow_dv_attr {
62         struct {
63                 uint32_t valid:1;
64                 uint32_t ipv4:1;
65                 uint32_t ipv6:1;
66                 uint32_t tcp:1;
67                 uint32_t udp:1;
68                 uint32_t reserved:27;
69         };
70         uint32_t attr;
71 };
72
73 static int
74 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
75                              struct mlx5_flow_tbl_resource *tbl);
76
77 static int
78 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
79                                      uint32_t encap_decap_idx);
80
81 static int
82 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
83                                         uint32_t port_id);
84 static void
85 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss);
86
87 static int
88 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
89                                   uint32_t rix_jump);
90
91 /**
92  * Initialize flow attributes structure according to flow items' types.
93  *
94  * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel
95  * mode. For tunnel mode, the items to be modified are the outermost ones.
96  *
97  * @param[in] item
98  *   Pointer to item specification.
99  * @param[out] attr
100  *   Pointer to flow attributes structure.
101  * @param[in] dev_flow
102  *   Pointer to the sub flow.
103  * @param[in] tunnel_decap
104  *   Whether action is after tunnel decapsulation.
105  */
106 static void
107 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
108                   struct mlx5_flow *dev_flow, bool tunnel_decap)
109 {
110         uint64_t layers = dev_flow->handle->layers;
111
112         /*
113          * If layers is already initialized, it means this dev_flow is the
114          * suffix flow, the layers flags is set by the prefix flow. Need to
115          * use the layer flags from prefix flow as the suffix flow may not
116          * have the user defined items as the flow is split.
117          */
118         if (layers) {
119                 if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
120                         attr->ipv4 = 1;
121                 else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
122                         attr->ipv6 = 1;
123                 if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
124                         attr->tcp = 1;
125                 else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
126                         attr->udp = 1;
127                 attr->valid = 1;
128                 return;
129         }
130         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
131                 uint8_t next_protocol = 0xff;
132                 switch (item->type) {
133                 case RTE_FLOW_ITEM_TYPE_GRE:
134                 case RTE_FLOW_ITEM_TYPE_NVGRE:
135                 case RTE_FLOW_ITEM_TYPE_VXLAN:
136                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
137                 case RTE_FLOW_ITEM_TYPE_GENEVE:
138                 case RTE_FLOW_ITEM_TYPE_MPLS:
139                         if (tunnel_decap)
140                                 attr->attr = 0;
141                         break;
142                 case RTE_FLOW_ITEM_TYPE_IPV4:
143                         if (!attr->ipv6)
144                                 attr->ipv4 = 1;
145                         if (item->mask != NULL &&
146                             ((const struct rte_flow_item_ipv4 *)
147                             item->mask)->hdr.next_proto_id)
148                                 next_protocol =
149                                     ((const struct rte_flow_item_ipv4 *)
150                                       (item->spec))->hdr.next_proto_id &
151                                     ((const struct rte_flow_item_ipv4 *)
152                                       (item->mask))->hdr.next_proto_id;
153                         if ((next_protocol == IPPROTO_IPIP ||
154                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
155                                 attr->attr = 0;
156                         break;
157                 case RTE_FLOW_ITEM_TYPE_IPV6:
158                         if (!attr->ipv4)
159                                 attr->ipv6 = 1;
160                         if (item->mask != NULL &&
161                             ((const struct rte_flow_item_ipv6 *)
162                             item->mask)->hdr.proto)
163                                 next_protocol =
164                                     ((const struct rte_flow_item_ipv6 *)
165                                       (item->spec))->hdr.proto &
166                                     ((const struct rte_flow_item_ipv6 *)
167                                       (item->mask))->hdr.proto;
168                         if ((next_protocol == IPPROTO_IPIP ||
169                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
170                                 attr->attr = 0;
171                         break;
172                 case RTE_FLOW_ITEM_TYPE_UDP:
173                         if (!attr->tcp)
174                                 attr->udp = 1;
175                         break;
176                 case RTE_FLOW_ITEM_TYPE_TCP:
177                         if (!attr->udp)
178                                 attr->tcp = 1;
179                         break;
180                 default:
181                         break;
182                 }
183         }
184         attr->valid = 1;
185 }
186
187 /**
188  * Convert rte_mtr_color to mlx5 color.
189  *
190  * @param[in] rcol
191  *   rte_mtr_color.
192  *
193  * @return
194  *   mlx5 color.
195  */
196 static int
197 rte_col_2_mlx5_col(enum rte_color rcol)
198 {
199         switch (rcol) {
200         case RTE_COLOR_GREEN:
201                 return MLX5_FLOW_COLOR_GREEN;
202         case RTE_COLOR_YELLOW:
203                 return MLX5_FLOW_COLOR_YELLOW;
204         case RTE_COLOR_RED:
205                 return MLX5_FLOW_COLOR_RED;
206         default:
207                 break;
208         }
209         return MLX5_FLOW_COLOR_UNDEFINED;
210 }
211
212 struct field_modify_info {
213         uint32_t size; /* Size of field in protocol header, in bytes. */
214         uint32_t offset; /* Offset of field in protocol header, in bytes. */
215         enum mlx5_modification_field id;
216 };
217
218 struct field_modify_info modify_eth[] = {
219         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
220         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
221         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
222         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
223         {0, 0, 0},
224 };
225
226 struct field_modify_info modify_vlan_out_first_vid[] = {
227         /* Size in bits !!! */
228         {12, 0, MLX5_MODI_OUT_FIRST_VID},
229         {0, 0, 0},
230 };
231
232 struct field_modify_info modify_ipv4[] = {
233         {1,  1, MLX5_MODI_OUT_IP_DSCP},
234         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
235         {4, 12, MLX5_MODI_OUT_SIPV4},
236         {4, 16, MLX5_MODI_OUT_DIPV4},
237         {0, 0, 0},
238 };
239
240 struct field_modify_info modify_ipv6[] = {
241         {1,  0, MLX5_MODI_OUT_IP_DSCP},
242         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
243         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
244         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
245         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
246         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
247         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
248         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
249         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
250         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
251         {0, 0, 0},
252 };
253
254 struct field_modify_info modify_udp[] = {
255         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
256         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
257         {0, 0, 0},
258 };
259
260 struct field_modify_info modify_tcp[] = {
261         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
262         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
263         {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
264         {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
265         {0, 0, 0},
266 };
267
268 static void
269 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
270                           uint8_t next_protocol, uint64_t *item_flags,
271                           int *tunnel)
272 {
273         MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
274                     item->type == RTE_FLOW_ITEM_TYPE_IPV6);
275         if (next_protocol == IPPROTO_IPIP) {
276                 *item_flags |= MLX5_FLOW_LAYER_IPIP;
277                 *tunnel = 1;
278         }
279         if (next_protocol == IPPROTO_IPV6) {
280                 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
281                 *tunnel = 1;
282         }
283 }
284
285 /* Update VLAN's VID/PCP based on input rte_flow_action.
286  *
287  * @param[in] action
288  *   Pointer to struct rte_flow_action.
289  * @param[out] vlan
290  *   Pointer to struct rte_vlan_hdr.
291  */
292 static void
293 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
294                          struct rte_vlan_hdr *vlan)
295 {
296         uint16_t vlan_tci;
297         if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
298                 vlan_tci =
299                     ((const struct rte_flow_action_of_set_vlan_pcp *)
300                                                action->conf)->vlan_pcp;
301                 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
302                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
303                 vlan->vlan_tci |= vlan_tci;
304         } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
305                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
306                 vlan->vlan_tci |= rte_be_to_cpu_16
307                     (((const struct rte_flow_action_of_set_vlan_vid *)
308                                              action->conf)->vlan_vid);
309         }
310 }
311
312 /**
313  * Fetch 1, 2, 3 or 4 byte field from the byte array
314  * and return as unsigned integer in host-endian format.
315  *
316  * @param[in] data
317  *   Pointer to data array.
318  * @param[in] size
319  *   Size of field to extract.
320  *
321  * @return
322  *   converted field in host endian format.
323  */
324 static inline uint32_t
325 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
326 {
327         uint32_t ret;
328
329         switch (size) {
330         case 1:
331                 ret = *data;
332                 break;
333         case 2:
334                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
335                 break;
336         case 3:
337                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
338                 ret = (ret << 8) | *(data + sizeof(uint16_t));
339                 break;
340         case 4:
341                 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
342                 break;
343         default:
344                 MLX5_ASSERT(false);
345                 ret = 0;
346                 break;
347         }
348         return ret;
349 }
350
351 /**
352  * Convert modify-header action to DV specification.
353  *
354  * Data length of each action is determined by provided field description
355  * and the item mask. Data bit offset and width of each action is determined
356  * by provided item mask.
357  *
358  * @param[in] item
359  *   Pointer to item specification.
360  * @param[in] field
361  *   Pointer to field modification information.
362  *     For MLX5_MODIFICATION_TYPE_SET specifies destination field.
363  *     For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
364  *     For MLX5_MODIFICATION_TYPE_COPY specifies source field.
365  * @param[in] dcopy
366  *   Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
367  *   Negative offset value sets the same offset as source offset.
368  *   size field is ignored, value is taken from source field.
369  * @param[in,out] resource
370  *   Pointer to the modify-header resource.
371  * @param[in] type
372  *   Type of modification.
373  * @param[out] error
374  *   Pointer to the error structure.
375  *
376  * @return
377  *   0 on success, a negative errno value otherwise and rte_errno is set.
378  */
379 static int
380 flow_dv_convert_modify_action(struct rte_flow_item *item,
381                               struct field_modify_info *field,
382                               struct field_modify_info *dcopy,
383                               struct mlx5_flow_dv_modify_hdr_resource *resource,
384                               uint32_t type, struct rte_flow_error *error)
385 {
386         uint32_t i = resource->actions_num;
387         struct mlx5_modification_cmd *actions = resource->actions;
388
389         /*
390          * The item and mask are provided in big-endian format.
391          * The fields should be presented as in big-endian format either.
392          * Mask must be always present, it defines the actual field width.
393          */
394         MLX5_ASSERT(item->mask);
395         MLX5_ASSERT(field->size);
396         do {
397                 unsigned int size_b;
398                 unsigned int off_b;
399                 uint32_t mask;
400                 uint32_t data;
401
402                 if (i >= MLX5_MAX_MODIFY_NUM)
403                         return rte_flow_error_set(error, EINVAL,
404                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
405                                  "too many items to modify");
406                 /* Fetch variable byte size mask from the array. */
407                 mask = flow_dv_fetch_field((const uint8_t *)item->mask +
408                                            field->offset, field->size);
409                 if (!mask) {
410                         ++field;
411                         continue;
412                 }
413                 /* Deduce actual data width in bits from mask value. */
414                 off_b = rte_bsf32(mask);
415                 size_b = sizeof(uint32_t) * CHAR_BIT -
416                          off_b - __builtin_clz(mask);
417                 MLX5_ASSERT(size_b);
418                 size_b = size_b == sizeof(uint32_t) * CHAR_BIT ? 0 : size_b;
419                 actions[i] = (struct mlx5_modification_cmd) {
420                         .action_type = type,
421                         .field = field->id,
422                         .offset = off_b,
423                         .length = size_b,
424                 };
425                 /* Convert entire record to expected big-endian format. */
426                 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
427                 if (type == MLX5_MODIFICATION_TYPE_COPY) {
428                         MLX5_ASSERT(dcopy);
429                         actions[i].dst_field = dcopy->id;
430                         actions[i].dst_offset =
431                                 (int)dcopy->offset < 0 ? off_b : dcopy->offset;
432                         /* Convert entire record to big-endian format. */
433                         actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
434                 } else {
435                         MLX5_ASSERT(item->spec);
436                         data = flow_dv_fetch_field((const uint8_t *)item->spec +
437                                                    field->offset, field->size);
438                         /* Shift out the trailing masked bits from data. */
439                         data = (data & mask) >> off_b;
440                         actions[i].data1 = rte_cpu_to_be_32(data);
441                 }
442                 ++i;
443                 ++field;
444         } while (field->size);
445         if (resource->actions_num == i)
446                 return rte_flow_error_set(error, EINVAL,
447                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
448                                           "invalid modification flow item");
449         resource->actions_num = i;
450         return 0;
451 }
452
453 /**
454  * Convert modify-header set IPv4 address action to DV specification.
455  *
456  * @param[in,out] resource
457  *   Pointer to the modify-header resource.
458  * @param[in] action
459  *   Pointer to action specification.
460  * @param[out] error
461  *   Pointer to the error structure.
462  *
463  * @return
464  *   0 on success, a negative errno value otherwise and rte_errno is set.
465  */
466 static int
467 flow_dv_convert_action_modify_ipv4
468                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
469                          const struct rte_flow_action *action,
470                          struct rte_flow_error *error)
471 {
472         const struct rte_flow_action_set_ipv4 *conf =
473                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
474         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
475         struct rte_flow_item_ipv4 ipv4;
476         struct rte_flow_item_ipv4 ipv4_mask;
477
478         memset(&ipv4, 0, sizeof(ipv4));
479         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
480         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
481                 ipv4.hdr.src_addr = conf->ipv4_addr;
482                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
483         } else {
484                 ipv4.hdr.dst_addr = conf->ipv4_addr;
485                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
486         }
487         item.spec = &ipv4;
488         item.mask = &ipv4_mask;
489         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
490                                              MLX5_MODIFICATION_TYPE_SET, error);
491 }
492
493 /**
494  * Convert modify-header set IPv6 address action to DV specification.
495  *
496  * @param[in,out] resource
497  *   Pointer to the modify-header resource.
498  * @param[in] action
499  *   Pointer to action specification.
500  * @param[out] error
501  *   Pointer to the error structure.
502  *
503  * @return
504  *   0 on success, a negative errno value otherwise and rte_errno is set.
505  */
506 static int
507 flow_dv_convert_action_modify_ipv6
508                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
509                          const struct rte_flow_action *action,
510                          struct rte_flow_error *error)
511 {
512         const struct rte_flow_action_set_ipv6 *conf =
513                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
514         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
515         struct rte_flow_item_ipv6 ipv6;
516         struct rte_flow_item_ipv6 ipv6_mask;
517
518         memset(&ipv6, 0, sizeof(ipv6));
519         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
520         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
521                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
522                        sizeof(ipv6.hdr.src_addr));
523                 memcpy(&ipv6_mask.hdr.src_addr,
524                        &rte_flow_item_ipv6_mask.hdr.src_addr,
525                        sizeof(ipv6.hdr.src_addr));
526         } else {
527                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
528                        sizeof(ipv6.hdr.dst_addr));
529                 memcpy(&ipv6_mask.hdr.dst_addr,
530                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
531                        sizeof(ipv6.hdr.dst_addr));
532         }
533         item.spec = &ipv6;
534         item.mask = &ipv6_mask;
535         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
536                                              MLX5_MODIFICATION_TYPE_SET, error);
537 }
538
539 /**
540  * Convert modify-header set MAC address action to DV specification.
541  *
542  * @param[in,out] resource
543  *   Pointer to the modify-header resource.
544  * @param[in] action
545  *   Pointer to action specification.
546  * @param[out] error
547  *   Pointer to the error structure.
548  *
549  * @return
550  *   0 on success, a negative errno value otherwise and rte_errno is set.
551  */
552 static int
553 flow_dv_convert_action_modify_mac
554                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
555                          const struct rte_flow_action *action,
556                          struct rte_flow_error *error)
557 {
558         const struct rte_flow_action_set_mac *conf =
559                 (const struct rte_flow_action_set_mac *)(action->conf);
560         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
561         struct rte_flow_item_eth eth;
562         struct rte_flow_item_eth eth_mask;
563
564         memset(&eth, 0, sizeof(eth));
565         memset(&eth_mask, 0, sizeof(eth_mask));
566         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
567                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
568                        sizeof(eth.src.addr_bytes));
569                 memcpy(&eth_mask.src.addr_bytes,
570                        &rte_flow_item_eth_mask.src.addr_bytes,
571                        sizeof(eth_mask.src.addr_bytes));
572         } else {
573                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
574                        sizeof(eth.dst.addr_bytes));
575                 memcpy(&eth_mask.dst.addr_bytes,
576                        &rte_flow_item_eth_mask.dst.addr_bytes,
577                        sizeof(eth_mask.dst.addr_bytes));
578         }
579         item.spec = &eth;
580         item.mask = &eth_mask;
581         return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
582                                              MLX5_MODIFICATION_TYPE_SET, error);
583 }
584
585 /**
586  * Convert modify-header set VLAN VID action to DV specification.
587  *
588  * @param[in,out] resource
589  *   Pointer to the modify-header resource.
590  * @param[in] action
591  *   Pointer to action specification.
592  * @param[out] error
593  *   Pointer to the error structure.
594  *
595  * @return
596  *   0 on success, a negative errno value otherwise and rte_errno is set.
597  */
598 static int
599 flow_dv_convert_action_modify_vlan_vid
600                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
601                          const struct rte_flow_action *action,
602                          struct rte_flow_error *error)
603 {
604         const struct rte_flow_action_of_set_vlan_vid *conf =
605                 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
606         int i = resource->actions_num;
607         struct mlx5_modification_cmd *actions = resource->actions;
608         struct field_modify_info *field = modify_vlan_out_first_vid;
609
610         if (i >= MLX5_MAX_MODIFY_NUM)
611                 return rte_flow_error_set(error, EINVAL,
612                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
613                          "too many items to modify");
614         actions[i] = (struct mlx5_modification_cmd) {
615                 .action_type = MLX5_MODIFICATION_TYPE_SET,
616                 .field = field->id,
617                 .length = field->size,
618                 .offset = field->offset,
619         };
620         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
621         actions[i].data1 = conf->vlan_vid;
622         actions[i].data1 = actions[i].data1 << 16;
623         resource->actions_num = ++i;
624         return 0;
625 }
626
627 /**
628  * Convert modify-header set TP action to DV specification.
629  *
630  * @param[in,out] resource
631  *   Pointer to the modify-header resource.
632  * @param[in] action
633  *   Pointer to action specification.
634  * @param[in] items
635  *   Pointer to rte_flow_item objects list.
636  * @param[in] attr
637  *   Pointer to flow attributes structure.
638  * @param[in] dev_flow
639  *   Pointer to the sub flow.
640  * @param[in] tunnel_decap
641  *   Whether action is after tunnel decapsulation.
642  * @param[out] error
643  *   Pointer to the error structure.
644  *
645  * @return
646  *   0 on success, a negative errno value otherwise and rte_errno is set.
647  */
648 static int
649 flow_dv_convert_action_modify_tp
650                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
651                          const struct rte_flow_action *action,
652                          const struct rte_flow_item *items,
653                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
654                          bool tunnel_decap, struct rte_flow_error *error)
655 {
656         const struct rte_flow_action_set_tp *conf =
657                 (const struct rte_flow_action_set_tp *)(action->conf);
658         struct rte_flow_item item;
659         struct rte_flow_item_udp udp;
660         struct rte_flow_item_udp udp_mask;
661         struct rte_flow_item_tcp tcp;
662         struct rte_flow_item_tcp tcp_mask;
663         struct field_modify_info *field;
664
665         if (!attr->valid)
666                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
667         if (attr->udp) {
668                 memset(&udp, 0, sizeof(udp));
669                 memset(&udp_mask, 0, sizeof(udp_mask));
670                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
671                         udp.hdr.src_port = conf->port;
672                         udp_mask.hdr.src_port =
673                                         rte_flow_item_udp_mask.hdr.src_port;
674                 } else {
675                         udp.hdr.dst_port = conf->port;
676                         udp_mask.hdr.dst_port =
677                                         rte_flow_item_udp_mask.hdr.dst_port;
678                 }
679                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
680                 item.spec = &udp;
681                 item.mask = &udp_mask;
682                 field = modify_udp;
683         } else {
684                 MLX5_ASSERT(attr->tcp);
685                 memset(&tcp, 0, sizeof(tcp));
686                 memset(&tcp_mask, 0, sizeof(tcp_mask));
687                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
688                         tcp.hdr.src_port = conf->port;
689                         tcp_mask.hdr.src_port =
690                                         rte_flow_item_tcp_mask.hdr.src_port;
691                 } else {
692                         tcp.hdr.dst_port = conf->port;
693                         tcp_mask.hdr.dst_port =
694                                         rte_flow_item_tcp_mask.hdr.dst_port;
695                 }
696                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
697                 item.spec = &tcp;
698                 item.mask = &tcp_mask;
699                 field = modify_tcp;
700         }
701         return flow_dv_convert_modify_action(&item, field, NULL, resource,
702                                              MLX5_MODIFICATION_TYPE_SET, error);
703 }
704
705 /**
706  * Convert modify-header set TTL action to DV specification.
707  *
708  * @param[in,out] resource
709  *   Pointer to the modify-header resource.
710  * @param[in] action
711  *   Pointer to action specification.
712  * @param[in] items
713  *   Pointer to rte_flow_item objects list.
714  * @param[in] attr
715  *   Pointer to flow attributes structure.
716  * @param[in] dev_flow
717  *   Pointer to the sub flow.
718  * @param[in] tunnel_decap
719  *   Whether action is after tunnel decapsulation.
720  * @param[out] error
721  *   Pointer to the error structure.
722  *
723  * @return
724  *   0 on success, a negative errno value otherwise and rte_errno is set.
725  */
726 static int
727 flow_dv_convert_action_modify_ttl
728                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
729                          const struct rte_flow_action *action,
730                          const struct rte_flow_item *items,
731                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
732                          bool tunnel_decap, struct rte_flow_error *error)
733 {
734         const struct rte_flow_action_set_ttl *conf =
735                 (const struct rte_flow_action_set_ttl *)(action->conf);
736         struct rte_flow_item item;
737         struct rte_flow_item_ipv4 ipv4;
738         struct rte_flow_item_ipv4 ipv4_mask;
739         struct rte_flow_item_ipv6 ipv6;
740         struct rte_flow_item_ipv6 ipv6_mask;
741         struct field_modify_info *field;
742
743         if (!attr->valid)
744                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
745         if (attr->ipv4) {
746                 memset(&ipv4, 0, sizeof(ipv4));
747                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
748                 ipv4.hdr.time_to_live = conf->ttl_value;
749                 ipv4_mask.hdr.time_to_live = 0xFF;
750                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
751                 item.spec = &ipv4;
752                 item.mask = &ipv4_mask;
753                 field = modify_ipv4;
754         } else {
755                 MLX5_ASSERT(attr->ipv6);
756                 memset(&ipv6, 0, sizeof(ipv6));
757                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
758                 ipv6.hdr.hop_limits = conf->ttl_value;
759                 ipv6_mask.hdr.hop_limits = 0xFF;
760                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
761                 item.spec = &ipv6;
762                 item.mask = &ipv6_mask;
763                 field = modify_ipv6;
764         }
765         return flow_dv_convert_modify_action(&item, field, NULL, resource,
766                                              MLX5_MODIFICATION_TYPE_SET, error);
767 }
768
769 /**
770  * Convert modify-header decrement TTL action to DV specification.
771  *
772  * @param[in,out] resource
773  *   Pointer to the modify-header resource.
774  * @param[in] action
775  *   Pointer to action specification.
776  * @param[in] items
777  *   Pointer to rte_flow_item objects list.
778  * @param[in] attr
779  *   Pointer to flow attributes structure.
780  * @param[in] dev_flow
781  *   Pointer to the sub flow.
782  * @param[in] tunnel_decap
783  *   Whether action is after tunnel decapsulation.
784  * @param[out] error
785  *   Pointer to the error structure.
786  *
787  * @return
788  *   0 on success, a negative errno value otherwise and rte_errno is set.
789  */
790 static int
791 flow_dv_convert_action_modify_dec_ttl
792                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
793                          const struct rte_flow_item *items,
794                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
795                          bool tunnel_decap, struct rte_flow_error *error)
796 {
797         struct rte_flow_item item;
798         struct rte_flow_item_ipv4 ipv4;
799         struct rte_flow_item_ipv4 ipv4_mask;
800         struct rte_flow_item_ipv6 ipv6;
801         struct rte_flow_item_ipv6 ipv6_mask;
802         struct field_modify_info *field;
803
804         if (!attr->valid)
805                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
806         if (attr->ipv4) {
807                 memset(&ipv4, 0, sizeof(ipv4));
808                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
809                 ipv4.hdr.time_to_live = 0xFF;
810                 ipv4_mask.hdr.time_to_live = 0xFF;
811                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
812                 item.spec = &ipv4;
813                 item.mask = &ipv4_mask;
814                 field = modify_ipv4;
815         } else {
816                 MLX5_ASSERT(attr->ipv6);
817                 memset(&ipv6, 0, sizeof(ipv6));
818                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
819                 ipv6.hdr.hop_limits = 0xFF;
820                 ipv6_mask.hdr.hop_limits = 0xFF;
821                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
822                 item.spec = &ipv6;
823                 item.mask = &ipv6_mask;
824                 field = modify_ipv6;
825         }
826         return flow_dv_convert_modify_action(&item, field, NULL, resource,
827                                              MLX5_MODIFICATION_TYPE_ADD, error);
828 }
829
830 /**
831  * Convert modify-header increment/decrement TCP Sequence number
832  * to DV specification.
833  *
834  * @param[in,out] resource
835  *   Pointer to the modify-header resource.
836  * @param[in] action
837  *   Pointer to action specification.
838  * @param[out] error
839  *   Pointer to the error structure.
840  *
841  * @return
842  *   0 on success, a negative errno value otherwise and rte_errno is set.
843  */
844 static int
845 flow_dv_convert_action_modify_tcp_seq
846                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
847                          const struct rte_flow_action *action,
848                          struct rte_flow_error *error)
849 {
850         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
851         uint64_t value = rte_be_to_cpu_32(*conf);
852         struct rte_flow_item item;
853         struct rte_flow_item_tcp tcp;
854         struct rte_flow_item_tcp tcp_mask;
855
856         memset(&tcp, 0, sizeof(tcp));
857         memset(&tcp_mask, 0, sizeof(tcp_mask));
858         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
859                 /*
860                  * The HW has no decrement operation, only increment operation.
861                  * To simulate decrement X from Y using increment operation
862                  * we need to add UINT32_MAX X times to Y.
863                  * Each adding of UINT32_MAX decrements Y by 1.
864                  */
865                 value *= UINT32_MAX;
866         tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
867         tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
868         item.type = RTE_FLOW_ITEM_TYPE_TCP;
869         item.spec = &tcp;
870         item.mask = &tcp_mask;
871         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
872                                              MLX5_MODIFICATION_TYPE_ADD, error);
873 }
874
875 /**
876  * Convert modify-header increment/decrement TCP Acknowledgment number
877  * to DV specification.
878  *
879  * @param[in,out] resource
880  *   Pointer to the modify-header resource.
881  * @param[in] action
882  *   Pointer to action specification.
883  * @param[out] error
884  *   Pointer to the error structure.
885  *
886  * @return
887  *   0 on success, a negative errno value otherwise and rte_errno is set.
888  */
889 static int
890 flow_dv_convert_action_modify_tcp_ack
891                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
892                          const struct rte_flow_action *action,
893                          struct rte_flow_error *error)
894 {
895         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
896         uint64_t value = rte_be_to_cpu_32(*conf);
897         struct rte_flow_item item;
898         struct rte_flow_item_tcp tcp;
899         struct rte_flow_item_tcp tcp_mask;
900
901         memset(&tcp, 0, sizeof(tcp));
902         memset(&tcp_mask, 0, sizeof(tcp_mask));
903         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
904                 /*
905                  * The HW has no decrement operation, only increment operation.
906                  * To simulate decrement X from Y using increment operation
907                  * we need to add UINT32_MAX X times to Y.
908                  * Each adding of UINT32_MAX decrements Y by 1.
909                  */
910                 value *= UINT32_MAX;
911         tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
912         tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
913         item.type = RTE_FLOW_ITEM_TYPE_TCP;
914         item.spec = &tcp;
915         item.mask = &tcp_mask;
916         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
917                                              MLX5_MODIFICATION_TYPE_ADD, error);
918 }
919
920 static enum mlx5_modification_field reg_to_field[] = {
921         [REG_NON] = MLX5_MODI_OUT_NONE,
922         [REG_A] = MLX5_MODI_META_DATA_REG_A,
923         [REG_B] = MLX5_MODI_META_DATA_REG_B,
924         [REG_C_0] = MLX5_MODI_META_REG_C_0,
925         [REG_C_1] = MLX5_MODI_META_REG_C_1,
926         [REG_C_2] = MLX5_MODI_META_REG_C_2,
927         [REG_C_3] = MLX5_MODI_META_REG_C_3,
928         [REG_C_4] = MLX5_MODI_META_REG_C_4,
929         [REG_C_5] = MLX5_MODI_META_REG_C_5,
930         [REG_C_6] = MLX5_MODI_META_REG_C_6,
931         [REG_C_7] = MLX5_MODI_META_REG_C_7,
932 };
933
934 /**
935  * Convert register set to DV specification.
936  *
937  * @param[in,out] resource
938  *   Pointer to the modify-header resource.
939  * @param[in] action
940  *   Pointer to action specification.
941  * @param[out] error
942  *   Pointer to the error structure.
943  *
944  * @return
945  *   0 on success, a negative errno value otherwise and rte_errno is set.
946  */
947 static int
948 flow_dv_convert_action_set_reg
949                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
950                          const struct rte_flow_action *action,
951                          struct rte_flow_error *error)
952 {
953         const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
954         struct mlx5_modification_cmd *actions = resource->actions;
955         uint32_t i = resource->actions_num;
956
957         if (i >= MLX5_MAX_MODIFY_NUM)
958                 return rte_flow_error_set(error, EINVAL,
959                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
960                                           "too many items to modify");
961         MLX5_ASSERT(conf->id != REG_NON);
962         MLX5_ASSERT(conf->id < (enum modify_reg)RTE_DIM(reg_to_field));
963         actions[i] = (struct mlx5_modification_cmd) {
964                 .action_type = MLX5_MODIFICATION_TYPE_SET,
965                 .field = reg_to_field[conf->id],
966         };
967         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
968         actions[i].data1 = rte_cpu_to_be_32(conf->data);
969         ++i;
970         resource->actions_num = i;
971         return 0;
972 }
973
974 /**
975  * Convert SET_TAG action to DV specification.
976  *
977  * @param[in] dev
978  *   Pointer to the rte_eth_dev structure.
979  * @param[in,out] resource
980  *   Pointer to the modify-header resource.
981  * @param[in] conf
982  *   Pointer to action specification.
983  * @param[out] error
984  *   Pointer to the error structure.
985  *
986  * @return
987  *   0 on success, a negative errno value otherwise and rte_errno is set.
988  */
989 static int
990 flow_dv_convert_action_set_tag
991                         (struct rte_eth_dev *dev,
992                          struct mlx5_flow_dv_modify_hdr_resource *resource,
993                          const struct rte_flow_action_set_tag *conf,
994                          struct rte_flow_error *error)
995 {
996         rte_be32_t data = rte_cpu_to_be_32(conf->data);
997         rte_be32_t mask = rte_cpu_to_be_32(conf->mask);
998         struct rte_flow_item item = {
999                 .spec = &data,
1000                 .mask = &mask,
1001         };
1002         struct field_modify_info reg_c_x[] = {
1003                 [1] = {0, 0, 0},
1004         };
1005         enum mlx5_modification_field reg_type;
1006         int ret;
1007
1008         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
1009         if (ret < 0)
1010                 return ret;
1011         MLX5_ASSERT(ret != REG_NON);
1012         MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
1013         reg_type = reg_to_field[ret];
1014         MLX5_ASSERT(reg_type > 0);
1015         reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
1016         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1017                                              MLX5_MODIFICATION_TYPE_SET, error);
1018 }
1019
1020 /**
1021  * Convert internal COPY_REG action to DV specification.
1022  *
1023  * @param[in] dev
1024  *   Pointer to the rte_eth_dev structure.
1025  * @param[in,out] res
1026  *   Pointer to the modify-header resource.
1027  * @param[in] action
1028  *   Pointer to action specification.
1029  * @param[out] error
1030  *   Pointer to the error structure.
1031  *
1032  * @return
1033  *   0 on success, a negative errno value otherwise and rte_errno is set.
1034  */
1035 static int
1036 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
1037                                  struct mlx5_flow_dv_modify_hdr_resource *res,
1038                                  const struct rte_flow_action *action,
1039                                  struct rte_flow_error *error)
1040 {
1041         const struct mlx5_flow_action_copy_mreg *conf = action->conf;
1042         rte_be32_t mask = RTE_BE32(UINT32_MAX);
1043         struct rte_flow_item item = {
1044                 .spec = NULL,
1045                 .mask = &mask,
1046         };
1047         struct field_modify_info reg_src[] = {
1048                 {4, 0, reg_to_field[conf->src]},
1049                 {0, 0, 0},
1050         };
1051         struct field_modify_info reg_dst = {
1052                 .offset = 0,
1053                 .id = reg_to_field[conf->dst],
1054         };
1055         /* Adjust reg_c[0] usage according to reported mask. */
1056         if (conf->dst == REG_C_0 || conf->src == REG_C_0) {
1057                 struct mlx5_priv *priv = dev->data->dev_private;
1058                 uint32_t reg_c0 = priv->sh->dv_regc0_mask;
1059
1060                 MLX5_ASSERT(reg_c0);
1061                 MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
1062                 if (conf->dst == REG_C_0) {
1063                         /* Copy to reg_c[0], within mask only. */
1064                         reg_dst.offset = rte_bsf32(reg_c0);
1065                         /*
1066                          * Mask is ignoring the enianness, because
1067                          * there is no conversion in datapath.
1068                          */
1069 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1070                         /* Copy from destination lower bits to reg_c[0]. */
1071                         mask = reg_c0 >> reg_dst.offset;
1072 #else
1073                         /* Copy from destination upper bits to reg_c[0]. */
1074                         mask = reg_c0 << (sizeof(reg_c0) * CHAR_BIT -
1075                                           rte_fls_u32(reg_c0));
1076 #endif
1077                 } else {
1078                         mask = rte_cpu_to_be_32(reg_c0);
1079 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1080                         /* Copy from reg_c[0] to destination lower bits. */
1081                         reg_dst.offset = 0;
1082 #else
1083                         /* Copy from reg_c[0] to destination upper bits. */
1084                         reg_dst.offset = sizeof(reg_c0) * CHAR_BIT -
1085                                          (rte_fls_u32(reg_c0) -
1086                                           rte_bsf32(reg_c0));
1087 #endif
1088                 }
1089         }
1090         return flow_dv_convert_modify_action(&item,
1091                                              reg_src, &reg_dst, res,
1092                                              MLX5_MODIFICATION_TYPE_COPY,
1093                                              error);
1094 }
1095
1096 /**
1097  * Convert MARK action to DV specification. This routine is used
1098  * in extensive metadata only and requires metadata register to be
1099  * handled. In legacy mode hardware tag resource is engaged.
1100  *
1101  * @param[in] dev
1102  *   Pointer to the rte_eth_dev structure.
1103  * @param[in] conf
1104  *   Pointer to MARK action specification.
1105  * @param[in,out] resource
1106  *   Pointer to the modify-header resource.
1107  * @param[out] error
1108  *   Pointer to the error structure.
1109  *
1110  * @return
1111  *   0 on success, a negative errno value otherwise and rte_errno is set.
1112  */
1113 static int
1114 flow_dv_convert_action_mark(struct rte_eth_dev *dev,
1115                             const struct rte_flow_action_mark *conf,
1116                             struct mlx5_flow_dv_modify_hdr_resource *resource,
1117                             struct rte_flow_error *error)
1118 {
1119         struct mlx5_priv *priv = dev->data->dev_private;
1120         rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK &
1121                                            priv->sh->dv_mark_mask);
1122         rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask;
1123         struct rte_flow_item item = {
1124                 .spec = &data,
1125                 .mask = &mask,
1126         };
1127         struct field_modify_info reg_c_x[] = {
1128                 [1] = {0, 0, 0},
1129         };
1130         int reg;
1131
1132         if (!mask)
1133                 return rte_flow_error_set(error, EINVAL,
1134                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1135                                           NULL, "zero mark action mask");
1136         reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1137         if (reg < 0)
1138                 return reg;
1139         MLX5_ASSERT(reg > 0);
1140         if (reg == REG_C_0) {
1141                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1142                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1143
1144                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1145                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1146                 mask = rte_cpu_to_be_32(mask << shl_c0);
1147         }
1148         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1149         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1150                                              MLX5_MODIFICATION_TYPE_SET, error);
1151 }
1152
1153 /**
1154  * Get metadata register index for specified steering domain.
1155  *
1156  * @param[in] dev
1157  *   Pointer to the rte_eth_dev structure.
1158  * @param[in] attr
1159  *   Attributes of flow to determine steering domain.
1160  * @param[out] error
1161  *   Pointer to the error structure.
1162  *
1163  * @return
1164  *   positive index on success, a negative errno value otherwise
1165  *   and rte_errno is set.
1166  */
1167 static enum modify_reg
1168 flow_dv_get_metadata_reg(struct rte_eth_dev *dev,
1169                          const struct rte_flow_attr *attr,
1170                          struct rte_flow_error *error)
1171 {
1172         int reg =
1173                 mlx5_flow_get_reg_id(dev, attr->transfer ?
1174                                           MLX5_METADATA_FDB :
1175                                             attr->egress ?
1176                                             MLX5_METADATA_TX :
1177                                             MLX5_METADATA_RX, 0, error);
1178         if (reg < 0)
1179                 return rte_flow_error_set(error,
1180                                           ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1181                                           NULL, "unavailable "
1182                                           "metadata register");
1183         return reg;
1184 }
1185
1186 /**
1187  * Convert SET_META action to DV specification.
1188  *
1189  * @param[in] dev
1190  *   Pointer to the rte_eth_dev structure.
1191  * @param[in,out] resource
1192  *   Pointer to the modify-header resource.
1193  * @param[in] attr
1194  *   Attributes of flow that includes this item.
1195  * @param[in] conf
1196  *   Pointer to action specification.
1197  * @param[out] error
1198  *   Pointer to the error structure.
1199  *
1200  * @return
1201  *   0 on success, a negative errno value otherwise and rte_errno is set.
1202  */
1203 static int
1204 flow_dv_convert_action_set_meta
1205                         (struct rte_eth_dev *dev,
1206                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1207                          const struct rte_flow_attr *attr,
1208                          const struct rte_flow_action_set_meta *conf,
1209                          struct rte_flow_error *error)
1210 {
1211         uint32_t data = conf->data;
1212         uint32_t mask = conf->mask;
1213         struct rte_flow_item item = {
1214                 .spec = &data,
1215                 .mask = &mask,
1216         };
1217         struct field_modify_info reg_c_x[] = {
1218                 [1] = {0, 0, 0},
1219         };
1220         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1221
1222         if (reg < 0)
1223                 return reg;
1224         MLX5_ASSERT(reg != REG_NON);
1225         /*
1226          * In datapath code there is no endianness
1227          * coversions for perfromance reasons, all
1228          * pattern conversions are done in rte_flow.
1229          */
1230         if (reg == REG_C_0) {
1231                 struct mlx5_priv *priv = dev->data->dev_private;
1232                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1233                 uint32_t shl_c0;
1234
1235                 MLX5_ASSERT(msk_c0);
1236 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1237                 shl_c0 = rte_bsf32(msk_c0);
1238 #else
1239                 shl_c0 = sizeof(msk_c0) * CHAR_BIT - rte_fls_u32(msk_c0);
1240 #endif
1241                 mask <<= shl_c0;
1242                 data <<= shl_c0;
1243                 MLX5_ASSERT(!(~msk_c0 & rte_cpu_to_be_32(mask)));
1244         }
1245         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1246         /* The routine expects parameters in memory as big-endian ones. */
1247         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1248                                              MLX5_MODIFICATION_TYPE_SET, error);
1249 }
1250
1251 /**
1252  * Convert modify-header set IPv4 DSCP action to DV specification.
1253  *
1254  * @param[in,out] resource
1255  *   Pointer to the modify-header resource.
1256  * @param[in] action
1257  *   Pointer to action specification.
1258  * @param[out] error
1259  *   Pointer to the error structure.
1260  *
1261  * @return
1262  *   0 on success, a negative errno value otherwise and rte_errno is set.
1263  */
1264 static int
1265 flow_dv_convert_action_modify_ipv4_dscp
1266                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1267                          const struct rte_flow_action *action,
1268                          struct rte_flow_error *error)
1269 {
1270         const struct rte_flow_action_set_dscp *conf =
1271                 (const struct rte_flow_action_set_dscp *)(action->conf);
1272         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
1273         struct rte_flow_item_ipv4 ipv4;
1274         struct rte_flow_item_ipv4 ipv4_mask;
1275
1276         memset(&ipv4, 0, sizeof(ipv4));
1277         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
1278         ipv4.hdr.type_of_service = conf->dscp;
1279         ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2;
1280         item.spec = &ipv4;
1281         item.mask = &ipv4_mask;
1282         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
1283                                              MLX5_MODIFICATION_TYPE_SET, error);
1284 }
1285
1286 /**
1287  * Convert modify-header set IPv6 DSCP action to DV specification.
1288  *
1289  * @param[in,out] resource
1290  *   Pointer to the modify-header resource.
1291  * @param[in] action
1292  *   Pointer to action specification.
1293  * @param[out] error
1294  *   Pointer to the error structure.
1295  *
1296  * @return
1297  *   0 on success, a negative errno value otherwise and rte_errno is set.
1298  */
1299 static int
1300 flow_dv_convert_action_modify_ipv6_dscp
1301                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1302                          const struct rte_flow_action *action,
1303                          struct rte_flow_error *error)
1304 {
1305         const struct rte_flow_action_set_dscp *conf =
1306                 (const struct rte_flow_action_set_dscp *)(action->conf);
1307         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
1308         struct rte_flow_item_ipv6 ipv6;
1309         struct rte_flow_item_ipv6 ipv6_mask;
1310
1311         memset(&ipv6, 0, sizeof(ipv6));
1312         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
1313         /*
1314          * Even though the DSCP bits offset of IPv6 is not byte aligned,
1315          * rdma-core only accept the DSCP bits byte aligned start from
1316          * bit 0 to 5 as to be compatible with IPv4. No need to shift the
1317          * bits in IPv6 case as rdma-core requires byte aligned value.
1318          */
1319         ipv6.hdr.vtc_flow = conf->dscp;
1320         ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22;
1321         item.spec = &ipv6;
1322         item.mask = &ipv6_mask;
1323         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
1324                                              MLX5_MODIFICATION_TYPE_SET, error);
1325 }
1326
1327 /**
1328  * Validate MARK item.
1329  *
1330  * @param[in] dev
1331  *   Pointer to the rte_eth_dev structure.
1332  * @param[in] item
1333  *   Item specification.
1334  * @param[in] attr
1335  *   Attributes of flow that includes this item.
1336  * @param[out] error
1337  *   Pointer to error structure.
1338  *
1339  * @return
1340  *   0 on success, a negative errno value otherwise and rte_errno is set.
1341  */
1342 static int
1343 flow_dv_validate_item_mark(struct rte_eth_dev *dev,
1344                            const struct rte_flow_item *item,
1345                            const struct rte_flow_attr *attr __rte_unused,
1346                            struct rte_flow_error *error)
1347 {
1348         struct mlx5_priv *priv = dev->data->dev_private;
1349         struct mlx5_dev_config *config = &priv->config;
1350         const struct rte_flow_item_mark *spec = item->spec;
1351         const struct rte_flow_item_mark *mask = item->mask;
1352         const struct rte_flow_item_mark nic_mask = {
1353                 .id = priv->sh->dv_mark_mask,
1354         };
1355         int ret;
1356
1357         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1358                 return rte_flow_error_set(error, ENOTSUP,
1359                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1360                                           "extended metadata feature"
1361                                           " isn't enabled");
1362         if (!mlx5_flow_ext_mreg_supported(dev))
1363                 return rte_flow_error_set(error, ENOTSUP,
1364                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1365                                           "extended metadata register"
1366                                           " isn't supported");
1367         if (!nic_mask.id)
1368                 return rte_flow_error_set(error, ENOTSUP,
1369                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1370                                           "extended metadata register"
1371                                           " isn't available");
1372         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1373         if (ret < 0)
1374                 return ret;
1375         if (!spec)
1376                 return rte_flow_error_set(error, EINVAL,
1377                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1378                                           item->spec,
1379                                           "data cannot be empty");
1380         if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id))
1381                 return rte_flow_error_set(error, EINVAL,
1382                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1383                                           &spec->id,
1384                                           "mark id exceeds the limit");
1385         if (!mask)
1386                 mask = &nic_mask;
1387         if (!mask->id)
1388                 return rte_flow_error_set(error, EINVAL,
1389                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1390                                         "mask cannot be zero");
1391
1392         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1393                                         (const uint8_t *)&nic_mask,
1394                                         sizeof(struct rte_flow_item_mark),
1395                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1396         if (ret < 0)
1397                 return ret;
1398         return 0;
1399 }
1400
1401 /**
1402  * Validate META item.
1403  *
1404  * @param[in] dev
1405  *   Pointer to the rte_eth_dev structure.
1406  * @param[in] item
1407  *   Item specification.
1408  * @param[in] attr
1409  *   Attributes of flow that includes this item.
1410  * @param[out] error
1411  *   Pointer to error structure.
1412  *
1413  * @return
1414  *   0 on success, a negative errno value otherwise and rte_errno is set.
1415  */
1416 static int
1417 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
1418                            const struct rte_flow_item *item,
1419                            const struct rte_flow_attr *attr,
1420                            struct rte_flow_error *error)
1421 {
1422         struct mlx5_priv *priv = dev->data->dev_private;
1423         struct mlx5_dev_config *config = &priv->config;
1424         const struct rte_flow_item_meta *spec = item->spec;
1425         const struct rte_flow_item_meta *mask = item->mask;
1426         struct rte_flow_item_meta nic_mask = {
1427                 .data = UINT32_MAX
1428         };
1429         int reg;
1430         int ret;
1431
1432         if (!spec)
1433                 return rte_flow_error_set(error, EINVAL,
1434                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1435                                           item->spec,
1436                                           "data cannot be empty");
1437         if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
1438                 if (!mlx5_flow_ext_mreg_supported(dev))
1439                         return rte_flow_error_set(error, ENOTSUP,
1440                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1441                                           "extended metadata register"
1442                                           " isn't supported");
1443                 reg = flow_dv_get_metadata_reg(dev, attr, error);
1444                 if (reg < 0)
1445                         return reg;
1446                 if (reg == REG_NON)
1447                         return rte_flow_error_set(error, ENOTSUP,
1448                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1449                                         "unavalable extended metadata register");
1450                 if (reg == REG_B)
1451                         return rte_flow_error_set(error, ENOTSUP,
1452                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1453                                           "match on reg_b "
1454                                           "isn't supported");
1455                 if (reg != REG_A)
1456                         nic_mask.data = priv->sh->dv_meta_mask;
1457         } else if (attr->transfer) {
1458                 return rte_flow_error_set(error, ENOTSUP,
1459                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1460                                         "extended metadata feature "
1461                                         "should be enabled when "
1462                                         "meta item is requested "
1463                                         "with e-switch mode ");
1464         }
1465         if (!mask)
1466                 mask = &rte_flow_item_meta_mask;
1467         if (!mask->data)
1468                 return rte_flow_error_set(error, EINVAL,
1469                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1470                                         "mask cannot be zero");
1471
1472         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1473                                         (const uint8_t *)&nic_mask,
1474                                         sizeof(struct rte_flow_item_meta),
1475                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1476         return ret;
1477 }
1478
1479 /**
1480  * Validate TAG item.
1481  *
1482  * @param[in] dev
1483  *   Pointer to the rte_eth_dev structure.
1484  * @param[in] item
1485  *   Item specification.
1486  * @param[in] attr
1487  *   Attributes of flow that includes this item.
1488  * @param[out] error
1489  *   Pointer to error structure.
1490  *
1491  * @return
1492  *   0 on success, a negative errno value otherwise and rte_errno is set.
1493  */
1494 static int
1495 flow_dv_validate_item_tag(struct rte_eth_dev *dev,
1496                           const struct rte_flow_item *item,
1497                           const struct rte_flow_attr *attr __rte_unused,
1498                           struct rte_flow_error *error)
1499 {
1500         const struct rte_flow_item_tag *spec = item->spec;
1501         const struct rte_flow_item_tag *mask = item->mask;
1502         const struct rte_flow_item_tag nic_mask = {
1503                 .data = RTE_BE32(UINT32_MAX),
1504                 .index = 0xff,
1505         };
1506         int ret;
1507
1508         if (!mlx5_flow_ext_mreg_supported(dev))
1509                 return rte_flow_error_set(error, ENOTSUP,
1510                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1511                                           "extensive metadata register"
1512                                           " isn't supported");
1513         if (!spec)
1514                 return rte_flow_error_set(error, EINVAL,
1515                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1516                                           item->spec,
1517                                           "data cannot be empty");
1518         if (!mask)
1519                 mask = &rte_flow_item_tag_mask;
1520         if (!mask->data)
1521                 return rte_flow_error_set(error, EINVAL,
1522                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1523                                         "mask cannot be zero");
1524
1525         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1526                                         (const uint8_t *)&nic_mask,
1527                                         sizeof(struct rte_flow_item_tag),
1528                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1529         if (ret < 0)
1530                 return ret;
1531         if (mask->index != 0xff)
1532                 return rte_flow_error_set(error, EINVAL,
1533                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1534                                           "partial mask for tag index"
1535                                           " is not supported");
1536         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
1537         if (ret < 0)
1538                 return ret;
1539         MLX5_ASSERT(ret != REG_NON);
1540         return 0;
1541 }
1542
1543 /**
1544  * Validate vport item.
1545  *
1546  * @param[in] dev
1547  *   Pointer to the rte_eth_dev structure.
1548  * @param[in] item
1549  *   Item specification.
1550  * @param[in] attr
1551  *   Attributes of flow that includes this item.
1552  * @param[in] item_flags
1553  *   Bit-fields that holds the items detected until now.
1554  * @param[out] error
1555  *   Pointer to error structure.
1556  *
1557  * @return
1558  *   0 on success, a negative errno value otherwise and rte_errno is set.
1559  */
1560 static int
1561 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
1562                               const struct rte_flow_item *item,
1563                               const struct rte_flow_attr *attr,
1564                               uint64_t item_flags,
1565                               struct rte_flow_error *error)
1566 {
1567         const struct rte_flow_item_port_id *spec = item->spec;
1568         const struct rte_flow_item_port_id *mask = item->mask;
1569         const struct rte_flow_item_port_id switch_mask = {
1570                         .id = 0xffffffff,
1571         };
1572         struct mlx5_priv *esw_priv;
1573         struct mlx5_priv *dev_priv;
1574         int ret;
1575
1576         if (!attr->transfer)
1577                 return rte_flow_error_set(error, EINVAL,
1578                                           RTE_FLOW_ERROR_TYPE_ITEM,
1579                                           NULL,
1580                                           "match on port id is valid only"
1581                                           " when transfer flag is enabled");
1582         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
1583                 return rte_flow_error_set(error, ENOTSUP,
1584                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1585                                           "multiple source ports are not"
1586                                           " supported");
1587         if (!mask)
1588                 mask = &switch_mask;
1589         if (mask->id != 0xffffffff)
1590                 return rte_flow_error_set(error, ENOTSUP,
1591                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
1592                                            mask,
1593                                            "no support for partial mask on"
1594                                            " \"id\" field");
1595         ret = mlx5_flow_item_acceptable
1596                                 (item, (const uint8_t *)mask,
1597                                  (const uint8_t *)&rte_flow_item_port_id_mask,
1598                                  sizeof(struct rte_flow_item_port_id),
1599                                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1600         if (ret)
1601                 return ret;
1602         if (!spec)
1603                 return 0;
1604         esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
1605         if (!esw_priv)
1606                 return rte_flow_error_set(error, rte_errno,
1607                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
1608                                           "failed to obtain E-Switch info for"
1609                                           " port");
1610         dev_priv = mlx5_dev_to_eswitch_info(dev);
1611         if (!dev_priv)
1612                 return rte_flow_error_set(error, rte_errno,
1613                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1614                                           NULL,
1615                                           "failed to obtain E-Switch info");
1616         if (esw_priv->domain_id != dev_priv->domain_id)
1617                 return rte_flow_error_set(error, EINVAL,
1618                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
1619                                           "cannot match on a port from a"
1620                                           " different E-Switch");
1621         return 0;
1622 }
1623
1624 /**
1625  * Validate VLAN item.
1626  *
1627  * @param[in] item
1628  *   Item specification.
1629  * @param[in] item_flags
1630  *   Bit-fields that holds the items detected until now.
1631  * @param[in] dev
1632  *   Ethernet device flow is being created on.
1633  * @param[out] error
1634  *   Pointer to error structure.
1635  *
1636  * @return
1637  *   0 on success, a negative errno value otherwise and rte_errno is set.
1638  */
1639 static int
1640 flow_dv_validate_item_vlan(const struct rte_flow_item *item,
1641                            uint64_t item_flags,
1642                            struct rte_eth_dev *dev,
1643                            struct rte_flow_error *error)
1644 {
1645         const struct rte_flow_item_vlan *mask = item->mask;
1646         const struct rte_flow_item_vlan nic_mask = {
1647                 .tci = RTE_BE16(UINT16_MAX),
1648                 .inner_type = RTE_BE16(UINT16_MAX),
1649                 .has_more_vlan = 1,
1650         };
1651         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1652         int ret;
1653         const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
1654                                         MLX5_FLOW_LAYER_INNER_L4) :
1655                                        (MLX5_FLOW_LAYER_OUTER_L3 |
1656                                         MLX5_FLOW_LAYER_OUTER_L4);
1657         const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
1658                                         MLX5_FLOW_LAYER_OUTER_VLAN;
1659
1660         if (item_flags & vlanm)
1661                 return rte_flow_error_set(error, EINVAL,
1662                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1663                                           "multiple VLAN layers not supported");
1664         else if ((item_flags & l34m) != 0)
1665                 return rte_flow_error_set(error, EINVAL,
1666                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1667                                           "VLAN cannot follow L3/L4 layer");
1668         if (!mask)
1669                 mask = &rte_flow_item_vlan_mask;
1670         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1671                                         (const uint8_t *)&nic_mask,
1672                                         sizeof(struct rte_flow_item_vlan),
1673                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1674         if (ret)
1675                 return ret;
1676         if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
1677                 struct mlx5_priv *priv = dev->data->dev_private;
1678
1679                 if (priv->vmwa_context) {
1680                         /*
1681                          * Non-NULL context means we have a virtual machine
1682                          * and SR-IOV enabled, we have to create VLAN interface
1683                          * to make hypervisor to setup E-Switch vport
1684                          * context correctly. We avoid creating the multiple
1685                          * VLAN interfaces, so we cannot support VLAN tag mask.
1686                          */
1687                         return rte_flow_error_set(error, EINVAL,
1688                                                   RTE_FLOW_ERROR_TYPE_ITEM,
1689                                                   item,
1690                                                   "VLAN tag mask is not"
1691                                                   " supported in virtual"
1692                                                   " environment");
1693                 }
1694         }
1695         return 0;
1696 }
1697
1698 /*
1699  * GTP flags are contained in 1 byte of the format:
1700  * -------------------------------------------
1701  * | bit   | 0 - 2   | 3  | 4   | 5 | 6 | 7  |
1702  * |-----------------------------------------|
1703  * | value | Version | PT | Res | E | S | PN |
1704  * -------------------------------------------
1705  *
1706  * Matching is supported only for GTP flags E, S, PN.
1707  */
1708 #define MLX5_GTP_FLAGS_MASK     0x07
1709
1710 /**
1711  * Validate GTP item.
1712  *
1713  * @param[in] dev
1714  *   Pointer to the rte_eth_dev structure.
1715  * @param[in] item
1716  *   Item specification.
1717  * @param[in] item_flags
1718  *   Bit-fields that holds the items detected until now.
1719  * @param[out] error
1720  *   Pointer to error structure.
1721  *
1722  * @return
1723  *   0 on success, a negative errno value otherwise and rte_errno is set.
1724  */
1725 static int
1726 flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
1727                           const struct rte_flow_item *item,
1728                           uint64_t item_flags,
1729                           struct rte_flow_error *error)
1730 {
1731         struct mlx5_priv *priv = dev->data->dev_private;
1732         const struct rte_flow_item_gtp *spec = item->spec;
1733         const struct rte_flow_item_gtp *mask = item->mask;
1734         const struct rte_flow_item_gtp nic_mask = {
1735                 .v_pt_rsv_flags = MLX5_GTP_FLAGS_MASK,
1736                 .msg_type = 0xff,
1737                 .teid = RTE_BE32(0xffffffff),
1738         };
1739
1740         if (!priv->config.hca_attr.tunnel_stateless_gtp)
1741                 return rte_flow_error_set(error, ENOTSUP,
1742                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1743                                           "GTP support is not enabled");
1744         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
1745                 return rte_flow_error_set(error, ENOTSUP,
1746                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1747                                           "multiple tunnel layers not"
1748                                           " supported");
1749         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
1750                 return rte_flow_error_set(error, EINVAL,
1751                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1752                                           "no outer UDP layer found");
1753         if (!mask)
1754                 mask = &rte_flow_item_gtp_mask;
1755         if (spec && spec->v_pt_rsv_flags & ~MLX5_GTP_FLAGS_MASK)
1756                 return rte_flow_error_set(error, ENOTSUP,
1757                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1758                                           "Match is supported for GTP"
1759                                           " flags only");
1760         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1761                                          (const uint8_t *)&nic_mask,
1762                                          sizeof(struct rte_flow_item_gtp),
1763                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1764 }
1765
1766 /**
1767  * Validate GTP PSC item.
1768  *
1769  * @param[in] item
1770  *   Item specification.
1771  * @param[in] last_item
1772  *   Previous validated item in the pattern items.
1773  * @param[in] gtp_item
1774  *   Previous GTP item specification.
1775  * @param[in] attr
1776  *   Pointer to flow attributes.
1777  * @param[out] error
1778  *   Pointer to error structure.
1779  *
1780  * @return
1781  *   0 on success, a negative errno value otherwise and rte_errno is set.
1782  */
1783 static int
1784 flow_dv_validate_item_gtp_psc(const struct rte_flow_item *item,
1785                               uint64_t last_item,
1786                               const struct rte_flow_item *gtp_item,
1787                               const struct rte_flow_attr *attr,
1788                               struct rte_flow_error *error)
1789 {
1790         const struct rte_flow_item_gtp *gtp_spec;
1791         const struct rte_flow_item_gtp *gtp_mask;
1792         const struct rte_flow_item_gtp_psc *spec;
1793         const struct rte_flow_item_gtp_psc *mask;
1794         const struct rte_flow_item_gtp_psc nic_mask = {
1795                 .pdu_type = 0xFF,
1796                 .qfi = 0xFF,
1797         };
1798
1799         if (!gtp_item || !(last_item & MLX5_FLOW_LAYER_GTP))
1800                 return rte_flow_error_set
1801                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
1802                          "GTP PSC item must be preceded with GTP item");
1803         gtp_spec = gtp_item->spec;
1804         gtp_mask = gtp_item->mask ? gtp_item->mask : &rte_flow_item_gtp_mask;
1805         /* GTP spec and E flag is requested to match zero. */
1806         if (gtp_spec &&
1807                 (gtp_mask->v_pt_rsv_flags &
1808                 ~gtp_spec->v_pt_rsv_flags & MLX5_GTP_EXT_HEADER_FLAG))
1809                 return rte_flow_error_set
1810                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
1811                          "GTP E flag must be 1 to match GTP PSC");
1812         /* Check the flow is not created in group zero. */
1813         if (!attr->transfer && !attr->group)
1814                 return rte_flow_error_set
1815                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1816                          "GTP PSC is not supported for group 0");
1817         /* GTP spec is here and E flag is requested to match zero. */
1818         if (!item->spec)
1819                 return 0;
1820         spec = item->spec;
1821         mask = item->mask ? item->mask : &rte_flow_item_gtp_psc_mask;
1822         if (spec->pdu_type > MLX5_GTP_EXT_MAX_PDU_TYPE)
1823                 return rte_flow_error_set
1824                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
1825                          "PDU type should be smaller than 16");
1826         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1827                                          (const uint8_t *)&nic_mask,
1828                                          sizeof(struct rte_flow_item_gtp_psc),
1829                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1830 }
1831
1832 /**
1833  * Validate IPV4 item.
1834  * Use existing validation function mlx5_flow_validate_item_ipv4(), and
1835  * add specific validation of fragment_offset field,
1836  *
1837  * @param[in] item
1838  *   Item specification.
1839  * @param[in] item_flags
1840  *   Bit-fields that holds the items detected until now.
1841  * @param[out] error
1842  *   Pointer to error structure.
1843  *
1844  * @return
1845  *   0 on success, a negative errno value otherwise and rte_errno is set.
1846  */
1847 static int
1848 flow_dv_validate_item_ipv4(const struct rte_flow_item *item,
1849                            uint64_t item_flags,
1850                            uint64_t last_item,
1851                            uint16_t ether_type,
1852                            struct rte_flow_error *error)
1853 {
1854         int ret;
1855         const struct rte_flow_item_ipv4 *spec = item->spec;
1856         const struct rte_flow_item_ipv4 *last = item->last;
1857         const struct rte_flow_item_ipv4 *mask = item->mask;
1858         rte_be16_t fragment_offset_spec = 0;
1859         rte_be16_t fragment_offset_last = 0;
1860         const struct rte_flow_item_ipv4 nic_ipv4_mask = {
1861                 .hdr = {
1862                         .src_addr = RTE_BE32(0xffffffff),
1863                         .dst_addr = RTE_BE32(0xffffffff),
1864                         .type_of_service = 0xff,
1865                         .fragment_offset = RTE_BE16(0xffff),
1866                         .next_proto_id = 0xff,
1867                         .time_to_live = 0xff,
1868                 },
1869         };
1870
1871         ret = mlx5_flow_validate_item_ipv4(item, item_flags, last_item,
1872                                            ether_type, &nic_ipv4_mask,
1873                                            MLX5_ITEM_RANGE_ACCEPTED, error);
1874         if (ret < 0)
1875                 return ret;
1876         if (spec && mask)
1877                 fragment_offset_spec = spec->hdr.fragment_offset &
1878                                        mask->hdr.fragment_offset;
1879         if (!fragment_offset_spec)
1880                 return 0;
1881         /*
1882          * spec and mask are valid, enforce using full mask to make sure the
1883          * complete value is used correctly.
1884          */
1885         if ((mask->hdr.fragment_offset & RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
1886                         != RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
1887                 return rte_flow_error_set(error, EINVAL,
1888                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
1889                                           item, "must use full mask for"
1890                                           " fragment_offset");
1891         /*
1892          * Match on fragment_offset 0x2000 means MF is 1 and frag-offset is 0,
1893          * indicating this is 1st fragment of fragmented packet.
1894          * This is not yet supported in MLX5, return appropriate error message.
1895          */
1896         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG))
1897                 return rte_flow_error_set(error, ENOTSUP,
1898                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1899                                           "match on first fragment not "
1900                                           "supported");
1901         if (fragment_offset_spec && !last)
1902                 return rte_flow_error_set(error, ENOTSUP,
1903                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1904                                           "specified value not supported");
1905         /* spec and last are valid, validate the specified range. */
1906         fragment_offset_last = last->hdr.fragment_offset &
1907                                mask->hdr.fragment_offset;
1908         /*
1909          * Match on fragment_offset spec 0x2001 and last 0x3fff
1910          * means MF is 1 and frag-offset is > 0.
1911          * This packet is fragment 2nd and onward, excluding last.
1912          * This is not yet supported in MLX5, return appropriate
1913          * error message.
1914          */
1915         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG + 1) &&
1916             fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
1917                 return rte_flow_error_set(error, ENOTSUP,
1918                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
1919                                           last, "match on following "
1920                                           "fragments not supported");
1921         /*
1922          * Match on fragment_offset spec 0x0001 and last 0x1fff
1923          * means MF is 0 and frag-offset is > 0.
1924          * This packet is last fragment of fragmented packet.
1925          * This is not yet supported in MLX5, return appropriate
1926          * error message.
1927          */
1928         if (fragment_offset_spec == RTE_BE16(1) &&
1929             fragment_offset_last == RTE_BE16(RTE_IPV4_HDR_OFFSET_MASK))
1930                 return rte_flow_error_set(error, ENOTSUP,
1931                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
1932                                           last, "match on last "
1933                                           "fragment not supported");
1934         /*
1935          * Match on fragment_offset spec 0x0001 and last 0x3fff
1936          * means MF and/or frag-offset is not 0.
1937          * This is a fragmented packet.
1938          * Other range values are invalid and rejected.
1939          */
1940         if (!(fragment_offset_spec == RTE_BE16(1) &&
1941               fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)))
1942                 return rte_flow_error_set(error, ENOTSUP,
1943                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
1944                                           "specified range not supported");
1945         return 0;
1946 }
1947
1948 /**
1949  * Validate IPV6 fragment extension item.
1950  *
1951  * @param[in] item
1952  *   Item specification.
1953  * @param[in] item_flags
1954  *   Bit-fields that holds the items detected until now.
1955  * @param[out] error
1956  *   Pointer to error structure.
1957  *
1958  * @return
1959  *   0 on success, a negative errno value otherwise and rte_errno is set.
1960  */
1961 static int
1962 flow_dv_validate_item_ipv6_frag_ext(const struct rte_flow_item *item,
1963                                     uint64_t item_flags,
1964                                     struct rte_flow_error *error)
1965 {
1966         const struct rte_flow_item_ipv6_frag_ext *spec = item->spec;
1967         const struct rte_flow_item_ipv6_frag_ext *last = item->last;
1968         const struct rte_flow_item_ipv6_frag_ext *mask = item->mask;
1969         rte_be16_t frag_data_spec = 0;
1970         rte_be16_t frag_data_last = 0;
1971         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1972         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1973                                       MLX5_FLOW_LAYER_OUTER_L4;
1974         int ret = 0;
1975         struct rte_flow_item_ipv6_frag_ext nic_mask = {
1976                 .hdr = {
1977                         .next_header = 0xff,
1978                         .frag_data = RTE_BE16(0xffff),
1979                 },
1980         };
1981
1982         if (item_flags & l4m)
1983                 return rte_flow_error_set(error, EINVAL,
1984                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1985                                           "ipv6 fragment extension item cannot "
1986                                           "follow L4 item.");
1987         if ((tunnel && !(item_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
1988             (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6)))
1989                 return rte_flow_error_set(error, EINVAL,
1990                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1991                                           "ipv6 fragment extension item must "
1992                                           "follow ipv6 item");
1993         if (spec && mask)
1994                 frag_data_spec = spec->hdr.frag_data & mask->hdr.frag_data;
1995         if (!frag_data_spec)
1996                 return 0;
1997         /*
1998          * spec and mask are valid, enforce using full mask to make sure the
1999          * complete value is used correctly.
2000          */
2001         if ((mask->hdr.frag_data & RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) !=
2002                                 RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2003                 return rte_flow_error_set(error, EINVAL,
2004                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2005                                           item, "must use full mask for"
2006                                           " frag_data");
2007         /*
2008          * Match on frag_data 0x00001 means M is 1 and frag-offset is 0.
2009          * This is 1st fragment of fragmented packet.
2010          */
2011         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_MF_MASK))
2012                 return rte_flow_error_set(error, ENOTSUP,
2013                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2014                                           "match on first fragment not "
2015                                           "supported");
2016         if (frag_data_spec && !last)
2017                 return rte_flow_error_set(error, EINVAL,
2018                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2019                                           "specified value not supported");
2020         ret = mlx5_flow_item_acceptable
2021                                 (item, (const uint8_t *)mask,
2022                                  (const uint8_t *)&nic_mask,
2023                                  sizeof(struct rte_flow_item_ipv6_frag_ext),
2024                                  MLX5_ITEM_RANGE_ACCEPTED, error);
2025         if (ret)
2026                 return ret;
2027         /* spec and last are valid, validate the specified range. */
2028         frag_data_last = last->hdr.frag_data & mask->hdr.frag_data;
2029         /*
2030          * Match on frag_data spec 0x0009 and last 0xfff9
2031          * means M is 1 and frag-offset is > 0.
2032          * This packet is fragment 2nd and onward, excluding last.
2033          * This is not yet supported in MLX5, return appropriate
2034          * error message.
2035          */
2036         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN |
2037                                        RTE_IPV6_EHDR_MF_MASK) &&
2038             frag_data_last == RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2039                 return rte_flow_error_set(error, ENOTSUP,
2040                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2041                                           last, "match on following "
2042                                           "fragments not supported");
2043         /*
2044          * Match on frag_data spec 0x0008 and last 0xfff8
2045          * means M is 0 and frag-offset is > 0.
2046          * This packet is last fragment of fragmented packet.
2047          * This is not yet supported in MLX5, return appropriate
2048          * error message.
2049          */
2050         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN) &&
2051             frag_data_last == RTE_BE16(RTE_IPV6_EHDR_FO_MASK))
2052                 return rte_flow_error_set(error, ENOTSUP,
2053                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2054                                           last, "match on last "
2055                                           "fragment not supported");
2056         /* Other range values are invalid and rejected. */
2057         return rte_flow_error_set(error, EINVAL,
2058                                   RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2059                                   "specified range not supported");
2060 }
2061
2062 /**
2063  * Validate the pop VLAN action.
2064  *
2065  * @param[in] dev
2066  *   Pointer to the rte_eth_dev structure.
2067  * @param[in] action_flags
2068  *   Holds the actions detected until now.
2069  * @param[in] action
2070  *   Pointer to the pop vlan action.
2071  * @param[in] item_flags
2072  *   The items found in this flow rule.
2073  * @param[in] attr
2074  *   Pointer to flow attributes.
2075  * @param[out] error
2076  *   Pointer to error structure.
2077  *
2078  * @return
2079  *   0 on success, a negative errno value otherwise and rte_errno is set.
2080  */
2081 static int
2082 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
2083                                  uint64_t action_flags,
2084                                  const struct rte_flow_action *action,
2085                                  uint64_t item_flags,
2086                                  const struct rte_flow_attr *attr,
2087                                  struct rte_flow_error *error)
2088 {
2089         const struct mlx5_priv *priv = dev->data->dev_private;
2090
2091         (void)action;
2092         (void)attr;
2093         if (!priv->sh->pop_vlan_action)
2094                 return rte_flow_error_set(error, ENOTSUP,
2095                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2096                                           NULL,
2097                                           "pop vlan action is not supported");
2098         if (attr->egress)
2099                 return rte_flow_error_set(error, ENOTSUP,
2100                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2101                                           NULL,
2102                                           "pop vlan action not supported for "
2103                                           "egress");
2104         if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
2105                 return rte_flow_error_set(error, ENOTSUP,
2106                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2107                                           "no support for multiple VLAN "
2108                                           "actions");
2109         /* Pop VLAN with preceding Decap requires inner header with VLAN. */
2110         if ((action_flags & MLX5_FLOW_ACTION_DECAP) &&
2111             !(item_flags & MLX5_FLOW_LAYER_INNER_VLAN))
2112                 return rte_flow_error_set(error, ENOTSUP,
2113                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2114                                           NULL,
2115                                           "cannot pop vlan after decap without "
2116                                           "match on inner vlan in the flow");
2117         /* Pop VLAN without preceding Decap requires outer header with VLAN. */
2118         if (!(action_flags & MLX5_FLOW_ACTION_DECAP) &&
2119             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2120                 return rte_flow_error_set(error, ENOTSUP,
2121                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2122                                           NULL,
2123                                           "cannot pop vlan without a "
2124                                           "match on (outer) vlan in the flow");
2125         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2126                 return rte_flow_error_set(error, EINVAL,
2127                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2128                                           "wrong action order, port_id should "
2129                                           "be after pop VLAN action");
2130         if (!attr->transfer && priv->representor)
2131                 return rte_flow_error_set(error, ENOTSUP,
2132                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2133                                           "pop vlan action for VF representor "
2134                                           "not supported on NIC table");
2135         return 0;
2136 }
2137
2138 /**
2139  * Get VLAN default info from vlan match info.
2140  *
2141  * @param[in] items
2142  *   the list of item specifications.
2143  * @param[out] vlan
2144  *   pointer VLAN info to fill to.
2145  *
2146  * @return
2147  *   0 on success, a negative errno value otherwise and rte_errno is set.
2148  */
2149 static void
2150 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
2151                                   struct rte_vlan_hdr *vlan)
2152 {
2153         const struct rte_flow_item_vlan nic_mask = {
2154                 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
2155                                 MLX5DV_FLOW_VLAN_VID_MASK),
2156                 .inner_type = RTE_BE16(0xffff),
2157         };
2158
2159         if (items == NULL)
2160                 return;
2161         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2162                 int type = items->type;
2163
2164                 if (type == RTE_FLOW_ITEM_TYPE_VLAN ||
2165                     type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
2166                         break;
2167         }
2168         if (items->type != RTE_FLOW_ITEM_TYPE_END) {
2169                 const struct rte_flow_item_vlan *vlan_m = items->mask;
2170                 const struct rte_flow_item_vlan *vlan_v = items->spec;
2171
2172                 /* If VLAN item in pattern doesn't contain data, return here. */
2173                 if (!vlan_v)
2174                         return;
2175                 if (!vlan_m)
2176                         vlan_m = &nic_mask;
2177                 /* Only full match values are accepted */
2178                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
2179                      MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
2180                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
2181                         vlan->vlan_tci |=
2182                                 rte_be_to_cpu_16(vlan_v->tci &
2183                                                  MLX5DV_FLOW_VLAN_PCP_MASK_BE);
2184                 }
2185                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
2186                      MLX5DV_FLOW_VLAN_VID_MASK_BE) {
2187                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
2188                         vlan->vlan_tci |=
2189                                 rte_be_to_cpu_16(vlan_v->tci &
2190                                                  MLX5DV_FLOW_VLAN_VID_MASK_BE);
2191                 }
2192                 if (vlan_m->inner_type == nic_mask.inner_type)
2193                         vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
2194                                                            vlan_m->inner_type);
2195         }
2196 }
2197
2198 /**
2199  * Validate the push VLAN action.
2200  *
2201  * @param[in] dev
2202  *   Pointer to the rte_eth_dev structure.
2203  * @param[in] action_flags
2204  *   Holds the actions detected until now.
2205  * @param[in] item_flags
2206  *   The items found in this flow rule.
2207  * @param[in] action
2208  *   Pointer to the action structure.
2209  * @param[in] attr
2210  *   Pointer to flow attributes
2211  * @param[out] error
2212  *   Pointer to error structure.
2213  *
2214  * @return
2215  *   0 on success, a negative errno value otherwise and rte_errno is set.
2216  */
2217 static int
2218 flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
2219                                   uint64_t action_flags,
2220                                   const struct rte_flow_item_vlan *vlan_m,
2221                                   const struct rte_flow_action *action,
2222                                   const struct rte_flow_attr *attr,
2223                                   struct rte_flow_error *error)
2224 {
2225         const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
2226         const struct mlx5_priv *priv = dev->data->dev_private;
2227
2228         if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
2229             push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
2230                 return rte_flow_error_set(error, EINVAL,
2231                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2232                                           "invalid vlan ethertype");
2233         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2234                 return rte_flow_error_set(error, EINVAL,
2235                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2236                                           "wrong action order, port_id should "
2237                                           "be after push VLAN");
2238         if (!attr->transfer && priv->representor)
2239                 return rte_flow_error_set(error, ENOTSUP,
2240                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2241                                           "push vlan action for VF representor "
2242                                           "not supported on NIC table");
2243         if (vlan_m &&
2244             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) &&
2245             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) !=
2246                 MLX5DV_FLOW_VLAN_PCP_MASK_BE &&
2247             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) &&
2248             !(mlx5_flow_find_action
2249                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)))
2250                 return rte_flow_error_set(error, EINVAL,
2251                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2252                                           "not full match mask on VLAN PCP and "
2253                                           "there is no of_set_vlan_pcp action, "
2254                                           "push VLAN action cannot figure out "
2255                                           "PCP value");
2256         if (vlan_m &&
2257             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) &&
2258             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) !=
2259                 MLX5DV_FLOW_VLAN_VID_MASK_BE &&
2260             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) &&
2261             !(mlx5_flow_find_action
2262                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)))
2263                 return rte_flow_error_set(error, EINVAL,
2264                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2265                                           "not full match mask on VLAN VID and "
2266                                           "there is no of_set_vlan_vid action, "
2267                                           "push VLAN action cannot figure out "
2268                                           "VID value");
2269         (void)attr;
2270         return 0;
2271 }
2272
2273 /**
2274  * Validate the set VLAN PCP.
2275  *
2276  * @param[in] action_flags
2277  *   Holds the actions detected until now.
2278  * @param[in] actions
2279  *   Pointer to the list of actions remaining in the flow rule.
2280  * @param[out] error
2281  *   Pointer to error structure.
2282  *
2283  * @return
2284  *   0 on success, a negative errno value otherwise and rte_errno is set.
2285  */
2286 static int
2287 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
2288                                      const struct rte_flow_action actions[],
2289                                      struct rte_flow_error *error)
2290 {
2291         const struct rte_flow_action *action = actions;
2292         const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
2293
2294         if (conf->vlan_pcp > 7)
2295                 return rte_flow_error_set(error, EINVAL,
2296                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2297                                           "VLAN PCP value is too big");
2298         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
2299                 return rte_flow_error_set(error, ENOTSUP,
2300                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2301                                           "set VLAN PCP action must follow "
2302                                           "the push VLAN action");
2303         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
2304                 return rte_flow_error_set(error, ENOTSUP,
2305                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2306                                           "Multiple VLAN PCP modification are "
2307                                           "not supported");
2308         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2309                 return rte_flow_error_set(error, EINVAL,
2310                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2311                                           "wrong action order, port_id should "
2312                                           "be after set VLAN PCP");
2313         return 0;
2314 }
2315
2316 /**
2317  * Validate the set VLAN VID.
2318  *
2319  * @param[in] item_flags
2320  *   Holds the items detected in this rule.
2321  * @param[in] action_flags
2322  *   Holds the actions detected until now.
2323  * @param[in] actions
2324  *   Pointer to the list of actions remaining in the flow rule.
2325  * @param[out] error
2326  *   Pointer to error structure.
2327  *
2328  * @return
2329  *   0 on success, a negative errno value otherwise and rte_errno is set.
2330  */
2331 static int
2332 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
2333                                      uint64_t action_flags,
2334                                      const struct rte_flow_action actions[],
2335                                      struct rte_flow_error *error)
2336 {
2337         const struct rte_flow_action *action = actions;
2338         const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
2339
2340         if (rte_be_to_cpu_16(conf->vlan_vid) > 0xFFE)
2341                 return rte_flow_error_set(error, EINVAL,
2342                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2343                                           "VLAN VID value is too big");
2344         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
2345             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2346                 return rte_flow_error_set(error, ENOTSUP,
2347                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2348                                           "set VLAN VID action must follow push"
2349                                           " VLAN action or match on VLAN item");
2350         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
2351                 return rte_flow_error_set(error, ENOTSUP,
2352                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2353                                           "Multiple VLAN VID modifications are "
2354                                           "not supported");
2355         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2356                 return rte_flow_error_set(error, EINVAL,
2357                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2358                                           "wrong action order, port_id should "
2359                                           "be after set VLAN VID");
2360         return 0;
2361 }
2362
2363 /*
2364  * Validate the FLAG action.
2365  *
2366  * @param[in] dev
2367  *   Pointer to the rte_eth_dev structure.
2368  * @param[in] action_flags
2369  *   Holds the actions detected until now.
2370  * @param[in] attr
2371  *   Pointer to flow attributes
2372  * @param[out] error
2373  *   Pointer to error structure.
2374  *
2375  * @return
2376  *   0 on success, a negative errno value otherwise and rte_errno is set.
2377  */
2378 static int
2379 flow_dv_validate_action_flag(struct rte_eth_dev *dev,
2380                              uint64_t action_flags,
2381                              const struct rte_flow_attr *attr,
2382                              struct rte_flow_error *error)
2383 {
2384         struct mlx5_priv *priv = dev->data->dev_private;
2385         struct mlx5_dev_config *config = &priv->config;
2386         int ret;
2387
2388         /* Fall back if no extended metadata register support. */
2389         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
2390                 return mlx5_flow_validate_action_flag(action_flags, attr,
2391                                                       error);
2392         /* Extensive metadata mode requires registers. */
2393         if (!mlx5_flow_ext_mreg_supported(dev))
2394                 return rte_flow_error_set(error, ENOTSUP,
2395                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2396                                           "no metadata registers "
2397                                           "to support flag action");
2398         if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT))
2399                 return rte_flow_error_set(error, ENOTSUP,
2400                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2401                                           "extended metadata register"
2402                                           " isn't available");
2403         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
2404         if (ret < 0)
2405                 return ret;
2406         MLX5_ASSERT(ret > 0);
2407         if (action_flags & MLX5_FLOW_ACTION_MARK)
2408                 return rte_flow_error_set(error, EINVAL,
2409                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2410                                           "can't mark and flag in same flow");
2411         if (action_flags & MLX5_FLOW_ACTION_FLAG)
2412                 return rte_flow_error_set(error, EINVAL,
2413                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2414                                           "can't have 2 flag"
2415                                           " actions in same flow");
2416         return 0;
2417 }
2418
2419 /**
2420  * Validate MARK action.
2421  *
2422  * @param[in] dev
2423  *   Pointer to the rte_eth_dev structure.
2424  * @param[in] action
2425  *   Pointer to action.
2426  * @param[in] action_flags
2427  *   Holds the actions detected until now.
2428  * @param[in] attr
2429  *   Pointer to flow attributes
2430  * @param[out] error
2431  *   Pointer to error structure.
2432  *
2433  * @return
2434  *   0 on success, a negative errno value otherwise and rte_errno is set.
2435  */
2436 static int
2437 flow_dv_validate_action_mark(struct rte_eth_dev *dev,
2438                              const struct rte_flow_action *action,
2439                              uint64_t action_flags,
2440                              const struct rte_flow_attr *attr,
2441                              struct rte_flow_error *error)
2442 {
2443         struct mlx5_priv *priv = dev->data->dev_private;
2444         struct mlx5_dev_config *config = &priv->config;
2445         const struct rte_flow_action_mark *mark = action->conf;
2446         int ret;
2447
2448         if (is_tunnel_offload_active(dev))
2449                 return rte_flow_error_set(error, ENOTSUP,
2450                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2451                                           "no mark action "
2452                                           "if tunnel offload active");
2453         /* Fall back if no extended metadata register support. */
2454         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
2455                 return mlx5_flow_validate_action_mark(action, action_flags,
2456                                                       attr, error);
2457         /* Extensive metadata mode requires registers. */
2458         if (!mlx5_flow_ext_mreg_supported(dev))
2459                 return rte_flow_error_set(error, ENOTSUP,
2460                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2461                                           "no metadata registers "
2462                                           "to support mark action");
2463         if (!priv->sh->dv_mark_mask)
2464                 return rte_flow_error_set(error, ENOTSUP,
2465                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2466                                           "extended metadata register"
2467                                           " isn't available");
2468         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
2469         if (ret < 0)
2470                 return ret;
2471         MLX5_ASSERT(ret > 0);
2472         if (!mark)
2473                 return rte_flow_error_set(error, EINVAL,
2474                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2475                                           "configuration cannot be null");
2476         if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask))
2477                 return rte_flow_error_set(error, EINVAL,
2478                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2479                                           &mark->id,
2480                                           "mark id exceeds the limit");
2481         if (action_flags & MLX5_FLOW_ACTION_FLAG)
2482                 return rte_flow_error_set(error, EINVAL,
2483                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2484                                           "can't flag and mark in same flow");
2485         if (action_flags & MLX5_FLOW_ACTION_MARK)
2486                 return rte_flow_error_set(error, EINVAL,
2487                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2488                                           "can't have 2 mark actions in same"
2489                                           " flow");
2490         return 0;
2491 }
2492
2493 /**
2494  * Validate SET_META action.
2495  *
2496  * @param[in] dev
2497  *   Pointer to the rte_eth_dev structure.
2498  * @param[in] action
2499  *   Pointer to the action structure.
2500  * @param[in] action_flags
2501  *   Holds the actions detected until now.
2502  * @param[in] attr
2503  *   Pointer to flow attributes
2504  * @param[out] error
2505  *   Pointer to error structure.
2506  *
2507  * @return
2508  *   0 on success, a negative errno value otherwise and rte_errno is set.
2509  */
2510 static int
2511 flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
2512                                  const struct rte_flow_action *action,
2513                                  uint64_t action_flags __rte_unused,
2514                                  const struct rte_flow_attr *attr,
2515                                  struct rte_flow_error *error)
2516 {
2517         const struct rte_flow_action_set_meta *conf;
2518         uint32_t nic_mask = UINT32_MAX;
2519         int reg;
2520
2521         if (!mlx5_flow_ext_mreg_supported(dev))
2522                 return rte_flow_error_set(error, ENOTSUP,
2523                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2524                                           "extended metadata register"
2525                                           " isn't supported");
2526         reg = flow_dv_get_metadata_reg(dev, attr, error);
2527         if (reg < 0)
2528                 return reg;
2529         if (reg == REG_NON)
2530                 return rte_flow_error_set(error, ENOTSUP,
2531                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2532                                           "unavalable extended metadata register");
2533         if (reg != REG_A && reg != REG_B) {
2534                 struct mlx5_priv *priv = dev->data->dev_private;
2535
2536                 nic_mask = priv->sh->dv_meta_mask;
2537         }
2538         if (!(action->conf))
2539                 return rte_flow_error_set(error, EINVAL,
2540                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2541                                           "configuration cannot be null");
2542         conf = (const struct rte_flow_action_set_meta *)action->conf;
2543         if (!conf->mask)
2544                 return rte_flow_error_set(error, EINVAL,
2545                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2546                                           "zero mask doesn't have any effect");
2547         if (conf->mask & ~nic_mask)
2548                 return rte_flow_error_set(error, EINVAL,
2549                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2550                                           "meta data must be within reg C0");
2551         return 0;
2552 }
2553
2554 /**
2555  * Validate SET_TAG action.
2556  *
2557  * @param[in] dev
2558  *   Pointer to the rte_eth_dev structure.
2559  * @param[in] action
2560  *   Pointer to the action structure.
2561  * @param[in] action_flags
2562  *   Holds the actions detected until now.
2563  * @param[in] attr
2564  *   Pointer to flow attributes
2565  * @param[out] error
2566  *   Pointer to error structure.
2567  *
2568  * @return
2569  *   0 on success, a negative errno value otherwise and rte_errno is set.
2570  */
2571 static int
2572 flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
2573                                 const struct rte_flow_action *action,
2574                                 uint64_t action_flags,
2575                                 const struct rte_flow_attr *attr,
2576                                 struct rte_flow_error *error)
2577 {
2578         const struct rte_flow_action_set_tag *conf;
2579         const uint64_t terminal_action_flags =
2580                 MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE |
2581                 MLX5_FLOW_ACTION_RSS;
2582         int ret;
2583
2584         if (!mlx5_flow_ext_mreg_supported(dev))
2585                 return rte_flow_error_set(error, ENOTSUP,
2586                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2587                                           "extensive metadata register"
2588                                           " isn't supported");
2589         if (!(action->conf))
2590                 return rte_flow_error_set(error, EINVAL,
2591                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2592                                           "configuration cannot be null");
2593         conf = (const struct rte_flow_action_set_tag *)action->conf;
2594         if (!conf->mask)
2595                 return rte_flow_error_set(error, EINVAL,
2596                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2597                                           "zero mask doesn't have any effect");
2598         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
2599         if (ret < 0)
2600                 return ret;
2601         if (!attr->transfer && attr->ingress &&
2602             (action_flags & terminal_action_flags))
2603                 return rte_flow_error_set(error, EINVAL,
2604                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2605                                           "set_tag has no effect"
2606                                           " with terminal actions");
2607         return 0;
2608 }
2609
2610 /**
2611  * Validate count action.
2612  *
2613  * @param[in] dev
2614  *   Pointer to rte_eth_dev structure.
2615  * @param[in] action_flags
2616  *   Holds the actions detected until now.
2617  * @param[out] error
2618  *   Pointer to error structure.
2619  *
2620  * @return
2621  *   0 on success, a negative errno value otherwise and rte_errno is set.
2622  */
2623 static int
2624 flow_dv_validate_action_count(struct rte_eth_dev *dev,
2625                               uint64_t action_flags,
2626                               struct rte_flow_error *error)
2627 {
2628         struct mlx5_priv *priv = dev->data->dev_private;
2629
2630         if (action_flags & MLX5_FLOW_ACTION_COUNT)
2631                 return rte_flow_error_set(error, EINVAL,
2632                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2633                                           "duplicate count actions set");
2634         if (!priv->config.devx)
2635                 goto notsup_err;
2636 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
2637         return 0;
2638 #endif
2639 notsup_err:
2640         return rte_flow_error_set
2641                       (error, ENOTSUP,
2642                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2643                        NULL,
2644                        "count action not supported");
2645 }
2646
2647 /**
2648  * Validate the L2 encap action.
2649  *
2650  * @param[in] dev
2651  *   Pointer to the rte_eth_dev structure.
2652  * @param[in] action_flags
2653  *   Holds the actions detected until now.
2654  * @param[in] action
2655  *   Pointer to the action structure.
2656  * @param[in] attr
2657  *   Pointer to flow attributes.
2658  * @param[out] error
2659  *   Pointer to error structure.
2660  *
2661  * @return
2662  *   0 on success, a negative errno value otherwise and rte_errno is set.
2663  */
2664 static int
2665 flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
2666                                  uint64_t action_flags,
2667                                  const struct rte_flow_action *action,
2668                                  const struct rte_flow_attr *attr,
2669                                  struct rte_flow_error *error)
2670 {
2671         const struct mlx5_priv *priv = dev->data->dev_private;
2672
2673         if (!(action->conf))
2674                 return rte_flow_error_set(error, EINVAL,
2675                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2676                                           "configuration cannot be null");
2677         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
2678                 return rte_flow_error_set(error, EINVAL,
2679                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2680                                           "can only have a single encap action "
2681                                           "in a flow");
2682         if (!attr->transfer && priv->representor)
2683                 return rte_flow_error_set(error, ENOTSUP,
2684                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2685                                           "encap action for VF representor "
2686                                           "not supported on NIC table");
2687         return 0;
2688 }
2689
2690 /**
2691  * Validate a decap action.
2692  *
2693  * @param[in] dev
2694  *   Pointer to the rte_eth_dev structure.
2695  * @param[in] action_flags
2696  *   Holds the actions detected until now.
2697  * @param[in] action
2698  *   Pointer to the action structure.
2699  * @param[in] item_flags
2700  *   Holds the items detected.
2701  * @param[in] attr
2702  *   Pointer to flow attributes
2703  * @param[out] error
2704  *   Pointer to error structure.
2705  *
2706  * @return
2707  *   0 on success, a negative errno value otherwise and rte_errno is set.
2708  */
2709 static int
2710 flow_dv_validate_action_decap(struct rte_eth_dev *dev,
2711                               uint64_t action_flags,
2712                               const struct rte_flow_action *action,
2713                               const uint64_t item_flags,
2714                               const struct rte_flow_attr *attr,
2715                               struct rte_flow_error *error)
2716 {
2717         const struct mlx5_priv *priv = dev->data->dev_private;
2718
2719         if (priv->config.hca_attr.scatter_fcs_w_decap_disable &&
2720             !priv->config.decap_en)
2721                 return rte_flow_error_set(error, ENOTSUP,
2722                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2723                                           "decap is not enabled");
2724         if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
2725                 return rte_flow_error_set(error, ENOTSUP,
2726                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2727                                           action_flags &
2728                                           MLX5_FLOW_ACTION_DECAP ? "can only "
2729                                           "have a single decap action" : "decap "
2730                                           "after encap is not supported");
2731         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
2732                 return rte_flow_error_set(error, EINVAL,
2733                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2734                                           "can't have decap action after"
2735                                           " modify action");
2736         if (attr->egress)
2737                 return rte_flow_error_set(error, ENOTSUP,
2738                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2739                                           NULL,
2740                                           "decap action not supported for "
2741                                           "egress");
2742         if (!attr->transfer && priv->representor)
2743                 return rte_flow_error_set(error, ENOTSUP,
2744                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2745                                           "decap action for VF representor "
2746                                           "not supported on NIC table");
2747         if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP &&
2748             !(item_flags & MLX5_FLOW_LAYER_VXLAN))
2749                 return rte_flow_error_set(error, ENOTSUP,
2750                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2751                                 "VXLAN item should be present for VXLAN decap");
2752         return 0;
2753 }
2754
2755 const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,};
2756
2757 /**
2758  * Validate the raw encap and decap actions.
2759  *
2760  * @param[in] dev
2761  *   Pointer to the rte_eth_dev structure.
2762  * @param[in] decap
2763  *   Pointer to the decap action.
2764  * @param[in] encap
2765  *   Pointer to the encap action.
2766  * @param[in] attr
2767  *   Pointer to flow attributes
2768  * @param[in/out] action_flags
2769  *   Holds the actions detected until now.
2770  * @param[out] actions_n
2771  *   pointer to the number of actions counter.
2772  * @param[in] action
2773  *   Pointer to the action structure.
2774  * @param[in] item_flags
2775  *   Holds the items detected.
2776  * @param[out] error
2777  *   Pointer to error structure.
2778  *
2779  * @return
2780  *   0 on success, a negative errno value otherwise and rte_errno is set.
2781  */
2782 static int
2783 flow_dv_validate_action_raw_encap_decap
2784         (struct rte_eth_dev *dev,
2785          const struct rte_flow_action_raw_decap *decap,
2786          const struct rte_flow_action_raw_encap *encap,
2787          const struct rte_flow_attr *attr, uint64_t *action_flags,
2788          int *actions_n, const struct rte_flow_action *action,
2789          uint64_t item_flags, struct rte_flow_error *error)
2790 {
2791         const struct mlx5_priv *priv = dev->data->dev_private;
2792         int ret;
2793
2794         if (encap && (!encap->size || !encap->data))
2795                 return rte_flow_error_set(error, EINVAL,
2796                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2797                                           "raw encap data cannot be empty");
2798         if (decap && encap) {
2799                 if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE &&
2800                     encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
2801                         /* L3 encap. */
2802                         decap = NULL;
2803                 else if (encap->size <=
2804                            MLX5_ENCAPSULATION_DECISION_SIZE &&
2805                            decap->size >
2806                            MLX5_ENCAPSULATION_DECISION_SIZE)
2807                         /* L3 decap. */
2808                         encap = NULL;
2809                 else if (encap->size >
2810                            MLX5_ENCAPSULATION_DECISION_SIZE &&
2811                            decap->size >
2812                            MLX5_ENCAPSULATION_DECISION_SIZE)
2813                         /* 2 L2 actions: encap and decap. */
2814                         ;
2815                 else
2816                         return rte_flow_error_set(error,
2817                                 ENOTSUP,
2818                                 RTE_FLOW_ERROR_TYPE_ACTION,
2819                                 NULL, "unsupported too small "
2820                                 "raw decap and too small raw "
2821                                 "encap combination");
2822         }
2823         if (decap) {
2824                 ret = flow_dv_validate_action_decap(dev, *action_flags, action,
2825                                                     item_flags, attr, error);
2826                 if (ret < 0)
2827                         return ret;
2828                 *action_flags |= MLX5_FLOW_ACTION_DECAP;
2829                 ++(*actions_n);
2830         }
2831         if (encap) {
2832                 if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE)
2833                         return rte_flow_error_set(error, ENOTSUP,
2834                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2835                                                   NULL,
2836                                                   "small raw encap size");
2837                 if (*action_flags & MLX5_FLOW_ACTION_ENCAP)
2838                         return rte_flow_error_set(error, EINVAL,
2839                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2840                                                   NULL,
2841                                                   "more than one encap action");
2842                 if (!attr->transfer && priv->representor)
2843                         return rte_flow_error_set
2844                                         (error, ENOTSUP,
2845                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2846                                          "encap action for VF representor "
2847                                          "not supported on NIC table");
2848                 *action_flags |= MLX5_FLOW_ACTION_ENCAP;
2849                 ++(*actions_n);
2850         }
2851         return 0;
2852 }
2853
2854 /**
2855  * Match encap_decap resource.
2856  *
2857  * @param list
2858  *   Pointer to the hash list.
2859  * @param entry
2860  *   Pointer to exist resource entry object.
2861  * @param key
2862  *   Key of the new entry.
2863  * @param ctx_cb
2864  *   Pointer to new encap_decap resource.
2865  *
2866  * @return
2867  *   0 on matching, none-zero otherwise.
2868  */
2869 int
2870 flow_dv_encap_decap_match_cb(struct mlx5_hlist *list __rte_unused,
2871                              struct mlx5_hlist_entry *entry,
2872                              uint64_t key __rte_unused, void *cb_ctx)
2873 {
2874         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2875         struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
2876         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
2877
2878         cache_resource = container_of(entry,
2879                                       struct mlx5_flow_dv_encap_decap_resource,
2880                                       entry);
2881         if (resource->reformat_type == cache_resource->reformat_type &&
2882             resource->ft_type == cache_resource->ft_type &&
2883             resource->flags == cache_resource->flags &&
2884             resource->size == cache_resource->size &&
2885             !memcmp((const void *)resource->buf,
2886                     (const void *)cache_resource->buf,
2887                     resource->size))
2888                 return 0;
2889         return -1;
2890 }
2891
2892 /**
2893  * Allocate encap_decap resource.
2894  *
2895  * @param list
2896  *   Pointer to the hash list.
2897  * @param entry
2898  *   Pointer to exist resource entry object.
2899  * @param ctx_cb
2900  *   Pointer to new encap_decap resource.
2901  *
2902  * @return
2903  *   0 on matching, none-zero otherwise.
2904  */
2905 struct mlx5_hlist_entry *
2906 flow_dv_encap_decap_create_cb(struct mlx5_hlist *list,
2907                               uint64_t key __rte_unused,
2908                               void *cb_ctx)
2909 {
2910         struct mlx5_dev_ctx_shared *sh = list->ctx;
2911         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2912         struct mlx5dv_dr_domain *domain;
2913         struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
2914         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
2915         uint32_t idx;
2916         int ret;
2917
2918         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
2919                 domain = sh->fdb_domain;
2920         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
2921                 domain = sh->rx_domain;
2922         else
2923                 domain = sh->tx_domain;
2924         /* Register new encap/decap resource. */
2925         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
2926                                        &idx);
2927         if (!cache_resource) {
2928                 rte_flow_error_set(ctx->error, ENOMEM,
2929                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2930                                    "cannot allocate resource memory");
2931                 return NULL;
2932         }
2933         *cache_resource = *resource;
2934         cache_resource->idx = idx;
2935         ret = mlx5_flow_os_create_flow_action_packet_reformat
2936                                         (sh->ctx, domain, cache_resource,
2937                                          &cache_resource->action);
2938         if (ret) {
2939                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
2940                 rte_flow_error_set(ctx->error, ENOMEM,
2941                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2942                                    NULL, "cannot create action");
2943                 return NULL;
2944         }
2945
2946         return &cache_resource->entry;
2947 }
2948
2949 /**
2950  * Find existing encap/decap resource or create and register a new one.
2951  *
2952  * @param[in, out] dev
2953  *   Pointer to rte_eth_dev structure.
2954  * @param[in, out] resource
2955  *   Pointer to encap/decap resource.
2956  * @parm[in, out] dev_flow
2957  *   Pointer to the dev_flow.
2958  * @param[out] error
2959  *   pointer to error structure.
2960  *
2961  * @return
2962  *   0 on success otherwise -errno and errno is set.
2963  */
2964 static int
2965 flow_dv_encap_decap_resource_register
2966                         (struct rte_eth_dev *dev,
2967                          struct mlx5_flow_dv_encap_decap_resource *resource,
2968                          struct mlx5_flow *dev_flow,
2969                          struct rte_flow_error *error)
2970 {
2971         struct mlx5_priv *priv = dev->data->dev_private;
2972         struct mlx5_dev_ctx_shared *sh = priv->sh;
2973         struct mlx5_hlist_entry *entry;
2974         union {
2975                 struct {
2976                         uint32_t ft_type:8;
2977                         uint32_t refmt_type:8;
2978                         /*
2979                          * Header reformat actions can be shared between
2980                          * non-root tables. One bit to indicate non-root
2981                          * table or not.
2982                          */
2983                         uint32_t is_root:1;
2984                         uint32_t reserve:15;
2985                 };
2986                 uint32_t v32;
2987         } encap_decap_key = {
2988                 {
2989                         .ft_type = resource->ft_type,
2990                         .refmt_type = resource->reformat_type,
2991                         .is_root = !!dev_flow->dv.group,
2992                         .reserve = 0,
2993                 }
2994         };
2995         struct mlx5_flow_cb_ctx ctx = {
2996                 .error = error,
2997                 .data = resource,
2998         };
2999         uint64_t key64;
3000
3001         resource->flags = dev_flow->dv.group ? 0 : 1;
3002         key64 =  __rte_raw_cksum(&encap_decap_key.v32,
3003                                  sizeof(encap_decap_key.v32), 0);
3004         if (resource->reformat_type !=
3005             MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2 &&
3006             resource->size)
3007                 key64 = __rte_raw_cksum(resource->buf, resource->size, key64);
3008         entry = mlx5_hlist_register(sh->encaps_decaps, key64, &ctx);
3009         if (!entry)
3010                 return -rte_errno;
3011         resource = container_of(entry, typeof(*resource), entry);
3012         dev_flow->dv.encap_decap = resource;
3013         dev_flow->handle->dvh.rix_encap_decap = resource->idx;
3014         return 0;
3015 }
3016
3017 /**
3018  * Find existing table jump resource or create and register a new one.
3019  *
3020  * @param[in, out] dev
3021  *   Pointer to rte_eth_dev structure.
3022  * @param[in, out] tbl
3023  *   Pointer to flow table resource.
3024  * @parm[in, out] dev_flow
3025  *   Pointer to the dev_flow.
3026  * @param[out] error
3027  *   pointer to error structure.
3028  *
3029  * @return
3030  *   0 on success otherwise -errno and errno is set.
3031  */
3032 static int
3033 flow_dv_jump_tbl_resource_register
3034                         (struct rte_eth_dev *dev __rte_unused,
3035                          struct mlx5_flow_tbl_resource *tbl,
3036                          struct mlx5_flow *dev_flow,
3037                          struct rte_flow_error *error __rte_unused)
3038 {
3039         struct mlx5_flow_tbl_data_entry *tbl_data =
3040                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
3041
3042         MLX5_ASSERT(tbl);
3043         MLX5_ASSERT(tbl_data->jump.action);
3044         dev_flow->handle->rix_jump = tbl_data->idx;
3045         dev_flow->dv.jump = &tbl_data->jump;
3046         return 0;
3047 }
3048
3049 int
3050 flow_dv_port_id_match_cb(struct mlx5_cache_list *list __rte_unused,
3051                          struct mlx5_cache_entry *entry, void *cb_ctx)
3052 {
3053         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3054         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3055         struct mlx5_flow_dv_port_id_action_resource *res =
3056                         container_of(entry, typeof(*res), entry);
3057
3058         return ref->port_id != res->port_id;
3059 }
3060
3061 struct mlx5_cache_entry *
3062 flow_dv_port_id_create_cb(struct mlx5_cache_list *list,
3063                           struct mlx5_cache_entry *entry __rte_unused,
3064                           void *cb_ctx)
3065 {
3066         struct mlx5_dev_ctx_shared *sh = list->ctx;
3067         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3068         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3069         struct mlx5_flow_dv_port_id_action_resource *cache;
3070         uint32_t idx;
3071         int ret;
3072
3073         /* Register new port id action resource. */
3074         cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
3075         if (!cache) {
3076                 rte_flow_error_set(ctx->error, ENOMEM,
3077                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3078                                    "cannot allocate port_id action cache memory");
3079                 return NULL;
3080         }
3081         *cache = *ref;
3082         ret = mlx5_flow_os_create_flow_action_dest_port(sh->fdb_domain,
3083                                                         ref->port_id,
3084                                                         &cache->action);
3085         if (ret) {
3086                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], idx);
3087                 rte_flow_error_set(ctx->error, ENOMEM,
3088                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3089                                    "cannot create action");
3090                 return NULL;
3091         }
3092         return &cache->entry;
3093 }
3094
3095 /**
3096  * Find existing table port ID resource or create and register a new one.
3097  *
3098  * @param[in, out] dev
3099  *   Pointer to rte_eth_dev structure.
3100  * @param[in, out] resource
3101  *   Pointer to port ID action resource.
3102  * @parm[in, out] dev_flow
3103  *   Pointer to the dev_flow.
3104  * @param[out] error
3105  *   pointer to error structure.
3106  *
3107  * @return
3108  *   0 on success otherwise -errno and errno is set.
3109  */
3110 static int
3111 flow_dv_port_id_action_resource_register
3112                         (struct rte_eth_dev *dev,
3113                          struct mlx5_flow_dv_port_id_action_resource *resource,
3114                          struct mlx5_flow *dev_flow,
3115                          struct rte_flow_error *error)
3116 {
3117         struct mlx5_priv *priv = dev->data->dev_private;
3118         struct mlx5_cache_entry *entry;
3119         struct mlx5_flow_dv_port_id_action_resource *cache;
3120         struct mlx5_flow_cb_ctx ctx = {
3121                 .error = error,
3122                 .data = resource,
3123         };
3124
3125         entry = mlx5_cache_register(&priv->sh->port_id_action_list, &ctx);
3126         if (!entry)
3127                 return -rte_errno;
3128         cache = container_of(entry, typeof(*cache), entry);
3129         dev_flow->dv.port_id_action = cache;
3130         dev_flow->handle->rix_port_id_action = cache->idx;
3131         return 0;
3132 }
3133
3134 int
3135 flow_dv_push_vlan_match_cb(struct mlx5_cache_list *list __rte_unused,
3136                          struct mlx5_cache_entry *entry, void *cb_ctx)
3137 {
3138         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3139         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3140         struct mlx5_flow_dv_push_vlan_action_resource *res =
3141                         container_of(entry, typeof(*res), entry);
3142
3143         return ref->vlan_tag != res->vlan_tag || ref->ft_type != res->ft_type;
3144 }
3145
3146 struct mlx5_cache_entry *
3147 flow_dv_push_vlan_create_cb(struct mlx5_cache_list *list,
3148                           struct mlx5_cache_entry *entry __rte_unused,
3149                           void *cb_ctx)
3150 {
3151         struct mlx5_dev_ctx_shared *sh = list->ctx;
3152         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3153         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3154         struct mlx5_flow_dv_push_vlan_action_resource *cache;
3155         struct mlx5dv_dr_domain *domain;
3156         uint32_t idx;
3157         int ret;
3158
3159         /* Register new port id action resource. */
3160         cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
3161         if (!cache) {
3162                 rte_flow_error_set(ctx->error, ENOMEM,
3163                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3164                                    "cannot allocate push_vlan action cache memory");
3165                 return NULL;
3166         }
3167         *cache = *ref;
3168         if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3169                 domain = sh->fdb_domain;
3170         else if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3171                 domain = sh->rx_domain;
3172         else
3173                 domain = sh->tx_domain;
3174         ret = mlx5_flow_os_create_flow_action_push_vlan(domain, ref->vlan_tag,
3175                                                         &cache->action);
3176         if (ret) {
3177                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
3178                 rte_flow_error_set(ctx->error, ENOMEM,
3179                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3180                                    "cannot create push vlan action");
3181                 return NULL;
3182         }
3183         return &cache->entry;
3184 }
3185
3186 /**
3187  * Find existing push vlan resource or create and register a new one.
3188  *
3189  * @param [in, out] dev
3190  *   Pointer to rte_eth_dev structure.
3191  * @param[in, out] resource
3192  *   Pointer to port ID action resource.
3193  * @parm[in, out] dev_flow
3194  *   Pointer to the dev_flow.
3195  * @param[out] error
3196  *   pointer to error structure.
3197  *
3198  * @return
3199  *   0 on success otherwise -errno and errno is set.
3200  */
3201 static int
3202 flow_dv_push_vlan_action_resource_register
3203                        (struct rte_eth_dev *dev,
3204                         struct mlx5_flow_dv_push_vlan_action_resource *resource,
3205                         struct mlx5_flow *dev_flow,
3206                         struct rte_flow_error *error)
3207 {
3208         struct mlx5_priv *priv = dev->data->dev_private;
3209         struct mlx5_flow_dv_push_vlan_action_resource *cache;
3210         struct mlx5_cache_entry *entry;
3211         struct mlx5_flow_cb_ctx ctx = {
3212                 .error = error,
3213                 .data = resource,
3214         };
3215
3216         entry = mlx5_cache_register(&priv->sh->push_vlan_action_list, &ctx);
3217         if (!entry)
3218                 return -rte_errno;
3219         cache = container_of(entry, typeof(*cache), entry);
3220
3221         dev_flow->handle->dvh.rix_push_vlan = cache->idx;
3222         dev_flow->dv.push_vlan_res = cache;
3223         return 0;
3224 }
3225
3226 /**
3227  * Get the size of specific rte_flow_item_type hdr size
3228  *
3229  * @param[in] item_type
3230  *   Tested rte_flow_item_type.
3231  *
3232  * @return
3233  *   sizeof struct item_type, 0 if void or irrelevant.
3234  */
3235 static size_t
3236 flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type)
3237 {
3238         size_t retval;
3239
3240         switch (item_type) {
3241         case RTE_FLOW_ITEM_TYPE_ETH:
3242                 retval = sizeof(struct rte_ether_hdr);
3243                 break;
3244         case RTE_FLOW_ITEM_TYPE_VLAN:
3245                 retval = sizeof(struct rte_vlan_hdr);
3246                 break;
3247         case RTE_FLOW_ITEM_TYPE_IPV4:
3248                 retval = sizeof(struct rte_ipv4_hdr);
3249                 break;
3250         case RTE_FLOW_ITEM_TYPE_IPV6:
3251                 retval = sizeof(struct rte_ipv6_hdr);
3252                 break;
3253         case RTE_FLOW_ITEM_TYPE_UDP:
3254                 retval = sizeof(struct rte_udp_hdr);
3255                 break;
3256         case RTE_FLOW_ITEM_TYPE_TCP:
3257                 retval = sizeof(struct rte_tcp_hdr);
3258                 break;
3259         case RTE_FLOW_ITEM_TYPE_VXLAN:
3260         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3261                 retval = sizeof(struct rte_vxlan_hdr);
3262                 break;
3263         case RTE_FLOW_ITEM_TYPE_GRE:
3264         case RTE_FLOW_ITEM_TYPE_NVGRE:
3265                 retval = sizeof(struct rte_gre_hdr);
3266                 break;
3267         case RTE_FLOW_ITEM_TYPE_MPLS:
3268                 retval = sizeof(struct rte_mpls_hdr);
3269                 break;
3270         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
3271         default:
3272                 retval = 0;
3273                 break;
3274         }
3275         return retval;
3276 }
3277
3278 #define MLX5_ENCAP_IPV4_VERSION         0x40
3279 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
3280 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
3281 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
3282 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
3283 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
3284 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
3285
3286 /**
3287  * Convert the encap action data from list of rte_flow_item to raw buffer
3288  *
3289  * @param[in] items
3290  *   Pointer to rte_flow_item objects list.
3291  * @param[out] buf
3292  *   Pointer to the output buffer.
3293  * @param[out] size
3294  *   Pointer to the output buffer size.
3295  * @param[out] error
3296  *   Pointer to the error structure.
3297  *
3298  * @return
3299  *   0 on success, a negative errno value otherwise and rte_errno is set.
3300  */
3301 static int
3302 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
3303                            size_t *size, struct rte_flow_error *error)
3304 {
3305         struct rte_ether_hdr *eth = NULL;
3306         struct rte_vlan_hdr *vlan = NULL;
3307         struct rte_ipv4_hdr *ipv4 = NULL;
3308         struct rte_ipv6_hdr *ipv6 = NULL;
3309         struct rte_udp_hdr *udp = NULL;
3310         struct rte_vxlan_hdr *vxlan = NULL;
3311         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
3312         struct rte_gre_hdr *gre = NULL;
3313         size_t len;
3314         size_t temp_size = 0;
3315
3316         if (!items)
3317                 return rte_flow_error_set(error, EINVAL,
3318                                           RTE_FLOW_ERROR_TYPE_ACTION,
3319                                           NULL, "invalid empty data");
3320         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
3321                 len = flow_dv_get_item_hdr_len(items->type);
3322                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
3323                         return rte_flow_error_set(error, EINVAL,
3324                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3325                                                   (void *)items->type,
3326                                                   "items total size is too big"
3327                                                   " for encap action");
3328                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
3329                 switch (items->type) {
3330                 case RTE_FLOW_ITEM_TYPE_ETH:
3331                         eth = (struct rte_ether_hdr *)&buf[temp_size];
3332                         break;
3333                 case RTE_FLOW_ITEM_TYPE_VLAN:
3334                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
3335                         if (!eth)
3336                                 return rte_flow_error_set(error, EINVAL,
3337                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3338                                                 (void *)items->type,
3339                                                 "eth header not found");
3340                         if (!eth->ether_type)
3341                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
3342                         break;
3343                 case RTE_FLOW_ITEM_TYPE_IPV4:
3344                         ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
3345                         if (!vlan && !eth)
3346                                 return rte_flow_error_set(error, EINVAL,
3347                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3348                                                 (void *)items->type,
3349                                                 "neither eth nor vlan"
3350                                                 " header found");
3351                         if (vlan && !vlan->eth_proto)
3352                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3353                         else if (eth && !eth->ether_type)
3354                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3355                         if (!ipv4->version_ihl)
3356                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
3357                                                     MLX5_ENCAP_IPV4_IHL_MIN;
3358                         if (!ipv4->time_to_live)
3359                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
3360                         break;
3361                 case RTE_FLOW_ITEM_TYPE_IPV6:
3362                         ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
3363                         if (!vlan && !eth)
3364                                 return rte_flow_error_set(error, EINVAL,
3365                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3366                                                 (void *)items->type,
3367                                                 "neither eth nor vlan"
3368                                                 " header found");
3369                         if (vlan && !vlan->eth_proto)
3370                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3371                         else if (eth && !eth->ether_type)
3372                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3373                         if (!ipv6->vtc_flow)
3374                                 ipv6->vtc_flow =
3375                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
3376                         if (!ipv6->hop_limits)
3377                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
3378                         break;
3379                 case RTE_FLOW_ITEM_TYPE_UDP:
3380                         udp = (struct rte_udp_hdr *)&buf[temp_size];
3381                         if (!ipv4 && !ipv6)
3382                                 return rte_flow_error_set(error, EINVAL,
3383                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3384                                                 (void *)items->type,
3385                                                 "ip header not found");
3386                         if (ipv4 && !ipv4->next_proto_id)
3387                                 ipv4->next_proto_id = IPPROTO_UDP;
3388                         else if (ipv6 && !ipv6->proto)
3389                                 ipv6->proto = IPPROTO_UDP;
3390                         break;
3391                 case RTE_FLOW_ITEM_TYPE_VXLAN:
3392                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
3393                         if (!udp)
3394                                 return rte_flow_error_set(error, EINVAL,
3395                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3396                                                 (void *)items->type,
3397                                                 "udp header not found");
3398                         if (!udp->dst_port)
3399                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
3400                         if (!vxlan->vx_flags)
3401                                 vxlan->vx_flags =
3402                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
3403                         break;
3404                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3405                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
3406                         if (!udp)
3407                                 return rte_flow_error_set(error, EINVAL,
3408                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3409                                                 (void *)items->type,
3410                                                 "udp header not found");
3411                         if (!vxlan_gpe->proto)
3412                                 return rte_flow_error_set(error, EINVAL,
3413                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3414                                                 (void *)items->type,
3415                                                 "next protocol not found");
3416                         if (!udp->dst_port)
3417                                 udp->dst_port =
3418                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
3419                         if (!vxlan_gpe->vx_flags)
3420                                 vxlan_gpe->vx_flags =
3421                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
3422                         break;
3423                 case RTE_FLOW_ITEM_TYPE_GRE:
3424                 case RTE_FLOW_ITEM_TYPE_NVGRE:
3425                         gre = (struct rte_gre_hdr *)&buf[temp_size];
3426                         if (!gre->proto)
3427                                 return rte_flow_error_set(error, EINVAL,
3428                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3429                                                 (void *)items->type,
3430                                                 "next protocol not found");
3431                         if (!ipv4 && !ipv6)
3432                                 return rte_flow_error_set(error, EINVAL,
3433                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3434                                                 (void *)items->type,
3435                                                 "ip header not found");
3436                         if (ipv4 && !ipv4->next_proto_id)
3437                                 ipv4->next_proto_id = IPPROTO_GRE;
3438                         else if (ipv6 && !ipv6->proto)
3439                                 ipv6->proto = IPPROTO_GRE;
3440                         break;
3441                 case RTE_FLOW_ITEM_TYPE_VOID:
3442                         break;
3443                 default:
3444                         return rte_flow_error_set(error, EINVAL,
3445                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3446                                                   (void *)items->type,
3447                                                   "unsupported item type");
3448                         break;
3449                 }
3450                 temp_size += len;
3451         }
3452         *size = temp_size;
3453         return 0;
3454 }
3455
3456 static int
3457 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
3458 {
3459         struct rte_ether_hdr *eth = NULL;
3460         struct rte_vlan_hdr *vlan = NULL;
3461         struct rte_ipv6_hdr *ipv6 = NULL;
3462         struct rte_udp_hdr *udp = NULL;
3463         char *next_hdr;
3464         uint16_t proto;
3465
3466         eth = (struct rte_ether_hdr *)data;
3467         next_hdr = (char *)(eth + 1);
3468         proto = RTE_BE16(eth->ether_type);
3469
3470         /* VLAN skipping */
3471         while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
3472                 vlan = (struct rte_vlan_hdr *)next_hdr;
3473                 proto = RTE_BE16(vlan->eth_proto);
3474                 next_hdr += sizeof(struct rte_vlan_hdr);
3475         }
3476
3477         /* HW calculates IPv4 csum. no need to proceed */
3478         if (proto == RTE_ETHER_TYPE_IPV4)
3479                 return 0;
3480
3481         /* non IPv4/IPv6 header. not supported */
3482         if (proto != RTE_ETHER_TYPE_IPV6) {
3483                 return rte_flow_error_set(error, ENOTSUP,
3484                                           RTE_FLOW_ERROR_TYPE_ACTION,
3485                                           NULL, "Cannot offload non IPv4/IPv6");
3486         }
3487
3488         ipv6 = (struct rte_ipv6_hdr *)next_hdr;
3489
3490         /* ignore non UDP */
3491         if (ipv6->proto != IPPROTO_UDP)
3492                 return 0;
3493
3494         udp = (struct rte_udp_hdr *)(ipv6 + 1);
3495         udp->dgram_cksum = 0;
3496
3497         return 0;
3498 }
3499
3500 /**
3501  * Convert L2 encap action to DV specification.
3502  *
3503  * @param[in] dev
3504  *   Pointer to rte_eth_dev structure.
3505  * @param[in] action
3506  *   Pointer to action structure.
3507  * @param[in, out] dev_flow
3508  *   Pointer to the mlx5_flow.
3509  * @param[in] transfer
3510  *   Mark if the flow is E-Switch flow.
3511  * @param[out] error
3512  *   Pointer to the error structure.
3513  *
3514  * @return
3515  *   0 on success, a negative errno value otherwise and rte_errno is set.
3516  */
3517 static int
3518 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
3519                                const struct rte_flow_action *action,
3520                                struct mlx5_flow *dev_flow,
3521                                uint8_t transfer,
3522                                struct rte_flow_error *error)
3523 {
3524         const struct rte_flow_item *encap_data;
3525         const struct rte_flow_action_raw_encap *raw_encap_data;
3526         struct mlx5_flow_dv_encap_decap_resource res = {
3527                 .reformat_type =
3528                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
3529                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
3530                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
3531         };
3532
3533         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
3534                 raw_encap_data =
3535                         (const struct rte_flow_action_raw_encap *)action->conf;
3536                 res.size = raw_encap_data->size;
3537                 memcpy(res.buf, raw_encap_data->data, res.size);
3538         } else {
3539                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
3540                         encap_data =
3541                                 ((const struct rte_flow_action_vxlan_encap *)
3542                                                 action->conf)->definition;
3543                 else
3544                         encap_data =
3545                                 ((const struct rte_flow_action_nvgre_encap *)
3546                                                 action->conf)->definition;
3547                 if (flow_dv_convert_encap_data(encap_data, res.buf,
3548                                                &res.size, error))
3549                         return -rte_errno;
3550         }
3551         if (flow_dv_zero_encap_udp_csum(res.buf, error))
3552                 return -rte_errno;
3553         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3554                 return rte_flow_error_set(error, EINVAL,
3555                                           RTE_FLOW_ERROR_TYPE_ACTION,
3556                                           NULL, "can't create L2 encap action");
3557         return 0;
3558 }
3559
3560 /**
3561  * Convert L2 decap action to DV specification.
3562  *
3563  * @param[in] dev
3564  *   Pointer to rte_eth_dev structure.
3565  * @param[in, out] dev_flow
3566  *   Pointer to the mlx5_flow.
3567  * @param[in] transfer
3568  *   Mark if the flow is E-Switch flow.
3569  * @param[out] error
3570  *   Pointer to the error structure.
3571  *
3572  * @return
3573  *   0 on success, a negative errno value otherwise and rte_errno is set.
3574  */
3575 static int
3576 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
3577                                struct mlx5_flow *dev_flow,
3578                                uint8_t transfer,
3579                                struct rte_flow_error *error)
3580 {
3581         struct mlx5_flow_dv_encap_decap_resource res = {
3582                 .size = 0,
3583                 .reformat_type =
3584                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
3585                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
3586                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
3587         };
3588
3589         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3590                 return rte_flow_error_set(error, EINVAL,
3591                                           RTE_FLOW_ERROR_TYPE_ACTION,
3592                                           NULL, "can't create L2 decap action");
3593         return 0;
3594 }
3595
3596 /**
3597  * Convert raw decap/encap (L3 tunnel) action to DV specification.
3598  *
3599  * @param[in] dev
3600  *   Pointer to rte_eth_dev structure.
3601  * @param[in] action
3602  *   Pointer to action structure.
3603  * @param[in, out] dev_flow
3604  *   Pointer to the mlx5_flow.
3605  * @param[in] attr
3606  *   Pointer to the flow attributes.
3607  * @param[out] error
3608  *   Pointer to the error structure.
3609  *
3610  * @return
3611  *   0 on success, a negative errno value otherwise and rte_errno is set.
3612  */
3613 static int
3614 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
3615                                 const struct rte_flow_action *action,
3616                                 struct mlx5_flow *dev_flow,
3617                                 const struct rte_flow_attr *attr,
3618                                 struct rte_flow_error *error)
3619 {
3620         const struct rte_flow_action_raw_encap *encap_data;
3621         struct mlx5_flow_dv_encap_decap_resource res;
3622
3623         memset(&res, 0, sizeof(res));
3624         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
3625         res.size = encap_data->size;
3626         memcpy(res.buf, encap_data->data, res.size);
3627         res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
3628                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
3629                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
3630         if (attr->transfer)
3631                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
3632         else
3633                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3634                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
3635         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3636                 return rte_flow_error_set(error, EINVAL,
3637                                           RTE_FLOW_ERROR_TYPE_ACTION,
3638                                           NULL, "can't create encap action");
3639         return 0;
3640 }
3641
3642 /**
3643  * Create action push VLAN.
3644  *
3645  * @param[in] dev
3646  *   Pointer to rte_eth_dev structure.
3647  * @param[in] attr
3648  *   Pointer to the flow attributes.
3649  * @param[in] vlan
3650  *   Pointer to the vlan to push to the Ethernet header.
3651  * @param[in, out] dev_flow
3652  *   Pointer to the mlx5_flow.
3653  * @param[out] error
3654  *   Pointer to the error structure.
3655  *
3656  * @return
3657  *   0 on success, a negative errno value otherwise and rte_errno is set.
3658  */
3659 static int
3660 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
3661                                 const struct rte_flow_attr *attr,
3662                                 const struct rte_vlan_hdr *vlan,
3663                                 struct mlx5_flow *dev_flow,
3664                                 struct rte_flow_error *error)
3665 {
3666         struct mlx5_flow_dv_push_vlan_action_resource res;
3667
3668         memset(&res, 0, sizeof(res));
3669         res.vlan_tag =
3670                 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
3671                                  vlan->vlan_tci);
3672         if (attr->transfer)
3673                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
3674         else
3675                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3676                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
3677         return flow_dv_push_vlan_action_resource_register
3678                                             (dev, &res, dev_flow, error);
3679 }
3680
3681 /**
3682  * Validate the modify-header actions.
3683  *
3684  * @param[in] action_flags
3685  *   Holds the actions detected until now.
3686  * @param[in] action
3687  *   Pointer to the modify action.
3688  * @param[out] error
3689  *   Pointer to error structure.
3690  *
3691  * @return
3692  *   0 on success, a negative errno value otherwise and rte_errno is set.
3693  */
3694 static int
3695 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
3696                                    const struct rte_flow_action *action,
3697                                    struct rte_flow_error *error)
3698 {
3699         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
3700                 return rte_flow_error_set(error, EINVAL,
3701                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3702                                           NULL, "action configuration not set");
3703         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3704                 return rte_flow_error_set(error, EINVAL,
3705                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3706                                           "can't have encap action before"
3707                                           " modify action");
3708         return 0;
3709 }
3710
3711 /**
3712  * Validate the modify-header MAC address actions.
3713  *
3714  * @param[in] action_flags
3715  *   Holds the actions detected until now.
3716  * @param[in] action
3717  *   Pointer to the modify action.
3718  * @param[in] item_flags
3719  *   Holds the items detected.
3720  * @param[out] error
3721  *   Pointer to error structure.
3722  *
3723  * @return
3724  *   0 on success, a negative errno value otherwise and rte_errno is set.
3725  */
3726 static int
3727 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
3728                                    const struct rte_flow_action *action,
3729                                    const uint64_t item_flags,
3730                                    struct rte_flow_error *error)
3731 {
3732         int ret = 0;
3733
3734         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3735         if (!ret) {
3736                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
3737                         return rte_flow_error_set(error, EINVAL,
3738                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3739                                                   NULL,
3740                                                   "no L2 item in pattern");
3741         }
3742         return ret;
3743 }
3744
3745 /**
3746  * Validate the modify-header IPv4 address actions.
3747  *
3748  * @param[in] action_flags
3749  *   Holds the actions detected until now.
3750  * @param[in] action
3751  *   Pointer to the modify action.
3752  * @param[in] item_flags
3753  *   Holds the items detected.
3754  * @param[out] error
3755  *   Pointer to error structure.
3756  *
3757  * @return
3758  *   0 on success, a negative errno value otherwise and rte_errno is set.
3759  */
3760 static int
3761 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
3762                                     const struct rte_flow_action *action,
3763                                     const uint64_t item_flags,
3764                                     struct rte_flow_error *error)
3765 {
3766         int ret = 0;
3767         uint64_t layer;
3768
3769         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3770         if (!ret) {
3771                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3772                                  MLX5_FLOW_LAYER_INNER_L3_IPV4 :
3773                                  MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3774                 if (!(item_flags & layer))
3775                         return rte_flow_error_set(error, EINVAL,
3776                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3777                                                   NULL,
3778                                                   "no ipv4 item in pattern");
3779         }
3780         return ret;
3781 }
3782
3783 /**
3784  * Validate the modify-header IPv6 address actions.
3785  *
3786  * @param[in] action_flags
3787  *   Holds the actions detected until now.
3788  * @param[in] action
3789  *   Pointer to the modify action.
3790  * @param[in] item_flags
3791  *   Holds the items detected.
3792  * @param[out] error
3793  *   Pointer to error structure.
3794  *
3795  * @return
3796  *   0 on success, a negative errno value otherwise and rte_errno is set.
3797  */
3798 static int
3799 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
3800                                     const struct rte_flow_action *action,
3801                                     const uint64_t item_flags,
3802                                     struct rte_flow_error *error)
3803 {
3804         int ret = 0;
3805         uint64_t layer;
3806
3807         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3808         if (!ret) {
3809                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3810                                  MLX5_FLOW_LAYER_INNER_L3_IPV6 :
3811                                  MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3812                 if (!(item_flags & layer))
3813                         return rte_flow_error_set(error, EINVAL,
3814                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3815                                                   NULL,
3816                                                   "no ipv6 item in pattern");
3817         }
3818         return ret;
3819 }
3820
3821 /**
3822  * Validate the modify-header TP actions.
3823  *
3824  * @param[in] action_flags
3825  *   Holds the actions detected until now.
3826  * @param[in] action
3827  *   Pointer to the modify action.
3828  * @param[in] item_flags
3829  *   Holds the items detected.
3830  * @param[out] error
3831  *   Pointer to error structure.
3832  *
3833  * @return
3834  *   0 on success, a negative errno value otherwise and rte_errno is set.
3835  */
3836 static int
3837 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
3838                                   const struct rte_flow_action *action,
3839                                   const uint64_t item_flags,
3840                                   struct rte_flow_error *error)
3841 {
3842         int ret = 0;
3843         uint64_t layer;
3844
3845         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3846         if (!ret) {
3847                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3848                                  MLX5_FLOW_LAYER_INNER_L4 :
3849                                  MLX5_FLOW_LAYER_OUTER_L4;
3850                 if (!(item_flags & layer))
3851                         return rte_flow_error_set(error, EINVAL,
3852                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3853                                                   NULL, "no transport layer "
3854                                                   "in pattern");
3855         }
3856         return ret;
3857 }
3858
3859 /**
3860  * Validate the modify-header actions of increment/decrement
3861  * TCP Sequence-number.
3862  *
3863  * @param[in] action_flags
3864  *   Holds the actions detected until now.
3865  * @param[in] action
3866  *   Pointer to the modify action.
3867  * @param[in] item_flags
3868  *   Holds the items detected.
3869  * @param[out] error
3870  *   Pointer to error structure.
3871  *
3872  * @return
3873  *   0 on success, a negative errno value otherwise and rte_errno is set.
3874  */
3875 static int
3876 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
3877                                        const struct rte_flow_action *action,
3878                                        const uint64_t item_flags,
3879                                        struct rte_flow_error *error)
3880 {
3881         int ret = 0;
3882         uint64_t layer;
3883
3884         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3885         if (!ret) {
3886                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3887                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
3888                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
3889                 if (!(item_flags & layer))
3890                         return rte_flow_error_set(error, EINVAL,
3891                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3892                                                   NULL, "no TCP item in"
3893                                                   " pattern");
3894                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
3895                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
3896                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
3897                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
3898                         return rte_flow_error_set(error, EINVAL,
3899                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3900                                                   NULL,
3901                                                   "cannot decrease and increase"
3902                                                   " TCP sequence number"
3903                                                   " at the same time");
3904         }
3905         return ret;
3906 }
3907
3908 /**
3909  * Validate the modify-header actions of increment/decrement
3910  * TCP Acknowledgment number.
3911  *
3912  * @param[in] action_flags
3913  *   Holds the actions detected until now.
3914  * @param[in] action
3915  *   Pointer to the modify action.
3916  * @param[in] item_flags
3917  *   Holds the items detected.
3918  * @param[out] error
3919  *   Pointer to error structure.
3920  *
3921  * @return
3922  *   0 on success, a negative errno value otherwise and rte_errno is set.
3923  */
3924 static int
3925 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
3926                                        const struct rte_flow_action *action,
3927                                        const uint64_t item_flags,
3928                                        struct rte_flow_error *error)
3929 {
3930         int ret = 0;
3931         uint64_t layer;
3932
3933         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3934         if (!ret) {
3935                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3936                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
3937                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
3938                 if (!(item_flags & layer))
3939                         return rte_flow_error_set(error, EINVAL,
3940                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3941                                                   NULL, "no TCP item in"
3942                                                   " pattern");
3943                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
3944                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
3945                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
3946                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
3947                         return rte_flow_error_set(error, EINVAL,
3948                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3949                                                   NULL,
3950                                                   "cannot decrease and increase"
3951                                                   " TCP acknowledgment number"
3952                                                   " at the same time");
3953         }
3954         return ret;
3955 }
3956
3957 /**
3958  * Validate the modify-header TTL actions.
3959  *
3960  * @param[in] action_flags
3961  *   Holds the actions detected until now.
3962  * @param[in] action
3963  *   Pointer to the modify action.
3964  * @param[in] item_flags
3965  *   Holds the items detected.
3966  * @param[out] error
3967  *   Pointer to error structure.
3968  *
3969  * @return
3970  *   0 on success, a negative errno value otherwise and rte_errno is set.
3971  */
3972 static int
3973 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
3974                                    const struct rte_flow_action *action,
3975                                    const uint64_t item_flags,
3976                                    struct rte_flow_error *error)
3977 {
3978         int ret = 0;
3979         uint64_t layer;
3980
3981         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3982         if (!ret) {
3983                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3984                                  MLX5_FLOW_LAYER_INNER_L3 :
3985                                  MLX5_FLOW_LAYER_OUTER_L3;
3986                 if (!(item_flags & layer))
3987                         return rte_flow_error_set(error, EINVAL,
3988                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3989                                                   NULL,
3990                                                   "no IP protocol in pattern");
3991         }
3992         return ret;
3993 }
3994
3995 /**
3996  * Validate jump action.
3997  *
3998  * @param[in] action
3999  *   Pointer to the jump action.
4000  * @param[in] action_flags
4001  *   Holds the actions detected until now.
4002  * @param[in] attributes
4003  *   Pointer to flow attributes
4004  * @param[in] external
4005  *   Action belongs to flow rule created by request external to PMD.
4006  * @param[out] error
4007  *   Pointer to error structure.
4008  *
4009  * @return
4010  *   0 on success, a negative errno value otherwise and rte_errno is set.
4011  */
4012 static int
4013 flow_dv_validate_action_jump(struct rte_eth_dev *dev,
4014                              const struct mlx5_flow_tunnel *tunnel,
4015                              const struct rte_flow_action *action,
4016                              uint64_t action_flags,
4017                              const struct rte_flow_attr *attributes,
4018                              bool external, struct rte_flow_error *error)
4019 {
4020         uint32_t target_group, table;
4021         int ret = 0;
4022         struct flow_grp_info grp_info = {
4023                 .external = !!external,
4024                 .transfer = !!attributes->transfer,
4025                 .fdb_def_rule = 1,
4026                 .std_tbl_fix = 0
4027         };
4028         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
4029                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4030                 return rte_flow_error_set(error, EINVAL,
4031                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4032                                           "can't have 2 fate actions in"
4033                                           " same flow");
4034         if (action_flags & MLX5_FLOW_ACTION_METER)
4035                 return rte_flow_error_set(error, ENOTSUP,
4036                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4037                                           "jump with meter not support");
4038         if (!action->conf)
4039                 return rte_flow_error_set(error, EINVAL,
4040                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4041                                           NULL, "action configuration not set");
4042         target_group =
4043                 ((const struct rte_flow_action_jump *)action->conf)->group;
4044         ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table,
4045                                        &grp_info, error);
4046         if (ret)
4047                 return ret;
4048         if (attributes->group == target_group &&
4049             !(action_flags & (MLX5_FLOW_ACTION_TUNNEL_SET |
4050                               MLX5_FLOW_ACTION_TUNNEL_MATCH)))
4051                 return rte_flow_error_set(error, EINVAL,
4052                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4053                                           "target group must be other than"
4054                                           " the current flow group");
4055         return 0;
4056 }
4057
4058 /*
4059  * Validate the port_id action.
4060  *
4061  * @param[in] dev
4062  *   Pointer to rte_eth_dev structure.
4063  * @param[in] action_flags
4064  *   Bit-fields that holds the actions detected until now.
4065  * @param[in] action
4066  *   Port_id RTE action structure.
4067  * @param[in] attr
4068  *   Attributes of flow that includes this action.
4069  * @param[out] error
4070  *   Pointer to error structure.
4071  *
4072  * @return
4073  *   0 on success, a negative errno value otherwise and rte_errno is set.
4074  */
4075 static int
4076 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
4077                                 uint64_t action_flags,
4078                                 const struct rte_flow_action *action,
4079                                 const struct rte_flow_attr *attr,
4080                                 struct rte_flow_error *error)
4081 {
4082         const struct rte_flow_action_port_id *port_id;
4083         struct mlx5_priv *act_priv;
4084         struct mlx5_priv *dev_priv;
4085         uint16_t port;
4086
4087         if (!attr->transfer)
4088                 return rte_flow_error_set(error, ENOTSUP,
4089                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4090                                           NULL,
4091                                           "port id action is valid in transfer"
4092                                           " mode only");
4093         if (!action || !action->conf)
4094                 return rte_flow_error_set(error, ENOTSUP,
4095                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4096                                           NULL,
4097                                           "port id action parameters must be"
4098                                           " specified");
4099         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
4100                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4101                 return rte_flow_error_set(error, EINVAL,
4102                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4103                                           "can have only one fate actions in"
4104                                           " a flow");
4105         dev_priv = mlx5_dev_to_eswitch_info(dev);
4106         if (!dev_priv)
4107                 return rte_flow_error_set(error, rte_errno,
4108                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4109                                           NULL,
4110                                           "failed to obtain E-Switch info");
4111         port_id = action->conf;
4112         port = port_id->original ? dev->data->port_id : port_id->id;
4113         act_priv = mlx5_port_to_eswitch_info(port, false);
4114         if (!act_priv)
4115                 return rte_flow_error_set
4116                                 (error, rte_errno,
4117                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
4118                                  "failed to obtain E-Switch port id for port");
4119         if (act_priv->domain_id != dev_priv->domain_id)
4120                 return rte_flow_error_set
4121                                 (error, EINVAL,
4122                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4123                                  "port does not belong to"
4124                                  " E-Switch being configured");
4125         return 0;
4126 }
4127
4128 /**
4129  * Get the maximum number of modify header actions.
4130  *
4131  * @param dev
4132  *   Pointer to rte_eth_dev structure.
4133  * @param flags
4134  *   Flags bits to check if root level.
4135  *
4136  * @return
4137  *   Max number of modify header actions device can support.
4138  */
4139 static inline unsigned int
4140 flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
4141                               uint64_t flags)
4142 {
4143         /*
4144          * There's no way to directly query the max capacity from FW.
4145          * The maximal value on root table should be assumed to be supported.
4146          */
4147         if (!(flags & MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL))
4148                 return MLX5_MAX_MODIFY_NUM;
4149         else
4150                 return MLX5_ROOT_TBL_MODIFY_NUM;
4151 }
4152
4153 /**
4154  * Validate the meter action.
4155  *
4156  * @param[in] dev
4157  *   Pointer to rte_eth_dev structure.
4158  * @param[in] action_flags
4159  *   Bit-fields that holds the actions detected until now.
4160  * @param[in] action
4161  *   Pointer to the meter action.
4162  * @param[in] attr
4163  *   Attributes of flow that includes this action.
4164  * @param[out] error
4165  *   Pointer to error structure.
4166  *
4167  * @return
4168  *   0 on success, a negative errno value otherwise and rte_ernno is set.
4169  */
4170 static int
4171 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
4172                                 uint64_t action_flags,
4173                                 const struct rte_flow_action *action,
4174                                 const struct rte_flow_attr *attr,
4175                                 struct rte_flow_error *error)
4176 {
4177         struct mlx5_priv *priv = dev->data->dev_private;
4178         const struct rte_flow_action_meter *am = action->conf;
4179         struct mlx5_flow_meter *fm;
4180
4181         if (!am)
4182                 return rte_flow_error_set(error, EINVAL,
4183                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4184                                           "meter action conf is NULL");
4185
4186         if (action_flags & MLX5_FLOW_ACTION_METER)
4187                 return rte_flow_error_set(error, ENOTSUP,
4188                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4189                                           "meter chaining not support");
4190         if (action_flags & MLX5_FLOW_ACTION_JUMP)
4191                 return rte_flow_error_set(error, ENOTSUP,
4192                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4193                                           "meter with jump not support");
4194         if (!priv->mtr_en)
4195                 return rte_flow_error_set(error, ENOTSUP,
4196                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4197                                           NULL,
4198                                           "meter action not supported");
4199         fm = mlx5_flow_meter_find(priv, am->mtr_id);
4200         if (!fm)
4201                 return rte_flow_error_set(error, EINVAL,
4202                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4203                                           "Meter not found");
4204         if (fm->ref_cnt && (!(fm->transfer == attr->transfer ||
4205               (!fm->ingress && !attr->ingress && attr->egress) ||
4206               (!fm->egress && !attr->egress && attr->ingress))))
4207                 return rte_flow_error_set(error, EINVAL,
4208                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4209                                           "Flow attributes are either invalid "
4210                                           "or have a conflict with current "
4211                                           "meter attributes");
4212         return 0;
4213 }
4214
4215 /**
4216  * Validate the age action.
4217  *
4218  * @param[in] action_flags
4219  *   Holds the actions detected until now.
4220  * @param[in] action
4221  *   Pointer to the age action.
4222  * @param[in] dev
4223  *   Pointer to the Ethernet device structure.
4224  * @param[out] error
4225  *   Pointer to error structure.
4226  *
4227  * @return
4228  *   0 on success, a negative errno value otherwise and rte_errno is set.
4229  */
4230 static int
4231 flow_dv_validate_action_age(uint64_t action_flags,
4232                             const struct rte_flow_action *action,
4233                             struct rte_eth_dev *dev,
4234                             struct rte_flow_error *error)
4235 {
4236         struct mlx5_priv *priv = dev->data->dev_private;
4237         const struct rte_flow_action_age *age = action->conf;
4238
4239         if (!priv->config.devx || (priv->sh->cmng.counter_fallback &&
4240             !priv->sh->aso_age_mng))
4241                 return rte_flow_error_set(error, ENOTSUP,
4242                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4243                                           NULL,
4244                                           "age action not supported");
4245         if (!(action->conf))
4246                 return rte_flow_error_set(error, EINVAL,
4247                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4248                                           "configuration cannot be null");
4249         if (!(age->timeout))
4250                 return rte_flow_error_set(error, EINVAL,
4251                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4252                                           "invalid timeout value 0");
4253         if (action_flags & MLX5_FLOW_ACTION_AGE)
4254                 return rte_flow_error_set(error, EINVAL,
4255                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4256                                           "duplicate age actions set");
4257         return 0;
4258 }
4259
4260 /**
4261  * Validate the modify-header IPv4 DSCP actions.
4262  *
4263  * @param[in] action_flags
4264  *   Holds the actions detected until now.
4265  * @param[in] action
4266  *   Pointer to the modify action.
4267  * @param[in] item_flags
4268  *   Holds the items detected.
4269  * @param[out] error
4270  *   Pointer to error structure.
4271  *
4272  * @return
4273  *   0 on success, a negative errno value otherwise and rte_errno is set.
4274  */
4275 static int
4276 flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags,
4277                                          const struct rte_flow_action *action,
4278                                          const uint64_t item_flags,
4279                                          struct rte_flow_error *error)
4280 {
4281         int ret = 0;
4282
4283         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4284         if (!ret) {
4285                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
4286                         return rte_flow_error_set(error, EINVAL,
4287                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4288                                                   NULL,
4289                                                   "no ipv4 item in pattern");
4290         }
4291         return ret;
4292 }
4293
4294 /**
4295  * Validate the modify-header IPv6 DSCP actions.
4296  *
4297  * @param[in] action_flags
4298  *   Holds the actions detected until now.
4299  * @param[in] action
4300  *   Pointer to the modify action.
4301  * @param[in] item_flags
4302  *   Holds the items detected.
4303  * @param[out] error
4304  *   Pointer to error structure.
4305  *
4306  * @return
4307  *   0 on success, a negative errno value otherwise and rte_errno is set.
4308  */
4309 static int
4310 flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,
4311                                          const struct rte_flow_action *action,
4312                                          const uint64_t item_flags,
4313                                          struct rte_flow_error *error)
4314 {
4315         int ret = 0;
4316
4317         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4318         if (!ret) {
4319                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
4320                         return rte_flow_error_set(error, EINVAL,
4321                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4322                                                   NULL,
4323                                                   "no ipv6 item in pattern");
4324         }
4325         return ret;
4326 }
4327
4328 /**
4329  * Match modify-header resource.
4330  *
4331  * @param list
4332  *   Pointer to the hash list.
4333  * @param entry
4334  *   Pointer to exist resource entry object.
4335  * @param key
4336  *   Key of the new entry.
4337  * @param ctx
4338  *   Pointer to new modify-header resource.
4339  *
4340  * @return
4341  *   0 on matching, non-zero otherwise.
4342  */
4343 int
4344 flow_dv_modify_match_cb(struct mlx5_hlist *list __rte_unused,
4345                         struct mlx5_hlist_entry *entry,
4346                         uint64_t key __rte_unused, void *cb_ctx)
4347 {
4348         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
4349         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
4350         struct mlx5_flow_dv_modify_hdr_resource *resource =
4351                         container_of(entry, typeof(*resource), entry);
4352         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
4353
4354         key_len += ref->actions_num * sizeof(ref->actions[0]);
4355         return ref->actions_num != resource->actions_num ||
4356                memcmp(&ref->ft_type, &resource->ft_type, key_len);
4357 }
4358
4359 struct mlx5_hlist_entry *
4360 flow_dv_modify_create_cb(struct mlx5_hlist *list, uint64_t key __rte_unused,
4361                          void *cb_ctx)
4362 {
4363         struct mlx5_dev_ctx_shared *sh = list->ctx;
4364         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
4365         struct mlx5dv_dr_domain *ns;
4366         struct mlx5_flow_dv_modify_hdr_resource *entry;
4367         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
4368         int ret;
4369         uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
4370         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
4371
4372         entry = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*entry) + data_len, 0,
4373                             SOCKET_ID_ANY);
4374         if (!entry) {
4375                 rte_flow_error_set(ctx->error, ENOMEM,
4376                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4377                                    "cannot allocate resource memory");
4378                 return NULL;
4379         }
4380         rte_memcpy(&entry->ft_type,
4381                    RTE_PTR_ADD(ref, offsetof(typeof(*ref), ft_type)),
4382                    key_len + data_len);
4383         if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
4384                 ns = sh->fdb_domain;
4385         else if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
4386                 ns = sh->tx_domain;
4387         else
4388                 ns = sh->rx_domain;
4389         ret = mlx5_flow_os_create_flow_action_modify_header
4390                                         (sh->ctx, ns, entry,
4391                                          data_len, &entry->action);
4392         if (ret) {
4393                 mlx5_free(entry);
4394                 rte_flow_error_set(ctx->error, ENOMEM,
4395                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4396                                    NULL, "cannot create modification action");
4397                 return NULL;
4398         }
4399         return &entry->entry;
4400 }
4401
4402 /**
4403  * Validate the sample action.
4404  *
4405  * @param[in, out] action_flags
4406  *   Holds the actions detected until now.
4407  * @param[in] action
4408  *   Pointer to the sample action.
4409  * @param[in] dev
4410  *   Pointer to the Ethernet device structure.
4411  * @param[in] attr
4412  *   Attributes of flow that includes this action.
4413  * @param[in] item_flags
4414  *   Holds the items detected.
4415  * @param[in] rss
4416  *   Pointer to the RSS action.
4417  * @param[out] sample_rss
4418  *   Pointer to the RSS action in sample action list.
4419  * @param[out] error
4420  *   Pointer to error structure.
4421  *
4422  * @return
4423  *   0 on success, a negative errno value otherwise and rte_errno is set.
4424  */
4425 static int
4426 flow_dv_validate_action_sample(uint64_t *action_flags,
4427                                const struct rte_flow_action *action,
4428                                struct rte_eth_dev *dev,
4429                                const struct rte_flow_attr *attr,
4430                                uint64_t item_flags,
4431                                const struct rte_flow_action_rss *rss,
4432                                const struct rte_flow_action_rss **sample_rss,
4433                                struct rte_flow_error *error)
4434 {
4435         struct mlx5_priv *priv = dev->data->dev_private;
4436         struct mlx5_dev_config *dev_conf = &priv->config;
4437         const struct rte_flow_action_sample *sample = action->conf;
4438         const struct rte_flow_action *act;
4439         uint64_t sub_action_flags = 0;
4440         uint16_t queue_index = 0xFFFF;
4441         int actions_n = 0;
4442         int ret;
4443
4444         if (!sample)
4445                 return rte_flow_error_set(error, EINVAL,
4446                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4447                                           "configuration cannot be NULL");
4448         if (sample->ratio == 0)
4449                 return rte_flow_error_set(error, EINVAL,
4450                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4451                                           "ratio value starts from 1");
4452         if (!priv->config.devx || (sample->ratio > 0 && !priv->sampler_en))
4453                 return rte_flow_error_set(error, ENOTSUP,
4454                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4455                                           NULL,
4456                                           "sample action not supported");
4457         if (*action_flags & MLX5_FLOW_ACTION_SAMPLE)
4458                 return rte_flow_error_set(error, EINVAL,
4459                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4460                                           "Multiple sample actions not "
4461                                           "supported");
4462         if (*action_flags & MLX5_FLOW_ACTION_METER)
4463                 return rte_flow_error_set(error, EINVAL,
4464                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4465                                           "wrong action order, meter should "
4466                                           "be after sample action");
4467         if (*action_flags & MLX5_FLOW_ACTION_JUMP)
4468                 return rte_flow_error_set(error, EINVAL,
4469                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4470                                           "wrong action order, jump should "
4471                                           "be after sample action");
4472         act = sample->actions;
4473         for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
4474                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
4475                         return rte_flow_error_set(error, ENOTSUP,
4476                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4477                                                   act, "too many actions");
4478                 switch (act->type) {
4479                 case RTE_FLOW_ACTION_TYPE_QUEUE:
4480                         ret = mlx5_flow_validate_action_queue(act,
4481                                                               sub_action_flags,
4482                                                               dev,
4483                                                               attr, error);
4484                         if (ret < 0)
4485                                 return ret;
4486                         queue_index = ((const struct rte_flow_action_queue *)
4487                                                         (act->conf))->index;
4488                         sub_action_flags |= MLX5_FLOW_ACTION_QUEUE;
4489                         ++actions_n;
4490                         break;
4491                 case RTE_FLOW_ACTION_TYPE_RSS:
4492                         *sample_rss = act->conf;
4493                         ret = mlx5_flow_validate_action_rss(act,
4494                                                             sub_action_flags,
4495                                                             dev, attr,
4496                                                             item_flags,
4497                                                             error);
4498                         if (ret < 0)
4499                                 return ret;
4500                         if (rss && *sample_rss &&
4501                             ((*sample_rss)->level != rss->level ||
4502                             (*sample_rss)->types != rss->types))
4503                                 return rte_flow_error_set(error, ENOTSUP,
4504                                         RTE_FLOW_ERROR_TYPE_ACTION,
4505                                         NULL,
4506                                         "Can't use the different RSS types "
4507                                         "or level in the same flow");
4508                         if (*sample_rss != NULL && (*sample_rss)->queue_num)
4509                                 queue_index = (*sample_rss)->queue[0];
4510                         sub_action_flags |= MLX5_FLOW_ACTION_RSS;
4511                         ++actions_n;
4512                         break;
4513                 case RTE_FLOW_ACTION_TYPE_MARK:
4514                         ret = flow_dv_validate_action_mark(dev, act,
4515                                                            sub_action_flags,
4516                                                            attr, error);
4517                         if (ret < 0)
4518                                 return ret;
4519                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
4520                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK |
4521                                                 MLX5_FLOW_ACTION_MARK_EXT;
4522                         else
4523                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK;
4524                         ++actions_n;
4525                         break;
4526                 case RTE_FLOW_ACTION_TYPE_COUNT:
4527                         if (*action_flags & MLX5_FLOW_ACTION_COUNT)
4528                                 return rte_flow_error_set(error, EINVAL,
4529                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4530                                                 action,
4531                                                 "duplicate count action set");
4532                         ret = flow_dv_validate_action_count(dev,
4533                                                             sub_action_flags,
4534                                                             error);
4535                         if (ret < 0)
4536                                 return ret;
4537                         sub_action_flags |= MLX5_FLOW_ACTION_COUNT;
4538                         *action_flags |= MLX5_FLOW_ACTION_COUNT;
4539                         ++actions_n;
4540                         break;
4541                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
4542                         ret = flow_dv_validate_action_port_id(dev,
4543                                                               sub_action_flags,
4544                                                               act,
4545                                                               attr,
4546                                                               error);
4547                         if (ret)
4548                                 return ret;
4549                         sub_action_flags |= MLX5_FLOW_ACTION_PORT_ID;
4550                         ++actions_n;
4551                         break;
4552                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
4553                         ret = flow_dv_validate_action_raw_encap_decap
4554                                 (dev, NULL, act->conf, attr, &sub_action_flags,
4555                                  &actions_n, action, item_flags, error);
4556                         if (ret < 0)
4557                                 return ret;
4558                         ++actions_n;
4559                         break;
4560                 default:
4561                         return rte_flow_error_set(error, ENOTSUP,
4562                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4563                                                   NULL,
4564                                                   "Doesn't support optional "
4565                                                   "action");
4566                 }
4567         }
4568         if (attr->ingress && !attr->transfer) {
4569                 if (!(sub_action_flags & (MLX5_FLOW_ACTION_QUEUE |
4570                                           MLX5_FLOW_ACTION_RSS)))
4571                         return rte_flow_error_set(error, EINVAL,
4572                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4573                                                   NULL,
4574                                                   "Ingress must has a dest "
4575                                                   "QUEUE for Sample");
4576         } else if (attr->egress && !attr->transfer) {
4577                 return rte_flow_error_set(error, ENOTSUP,
4578                                           RTE_FLOW_ERROR_TYPE_ACTION,
4579                                           NULL,
4580                                           "Sample Only support Ingress "
4581                                           "or E-Switch");
4582         } else if (sample->actions->type != RTE_FLOW_ACTION_TYPE_END) {
4583                 MLX5_ASSERT(attr->transfer);
4584                 if (sample->ratio > 1)
4585                         return rte_flow_error_set(error, ENOTSUP,
4586                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4587                                                   NULL,
4588                                                   "E-Switch doesn't support "
4589                                                   "any optional action "
4590                                                   "for sampling");
4591                 if (sub_action_flags & MLX5_FLOW_ACTION_QUEUE)
4592                         return rte_flow_error_set(error, ENOTSUP,
4593                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4594                                                   NULL,
4595                                                   "unsupported action QUEUE");
4596                 if (sub_action_flags & MLX5_FLOW_ACTION_RSS)
4597                         return rte_flow_error_set(error, ENOTSUP,
4598                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4599                                                   NULL,
4600                                                   "unsupported action QUEUE");
4601                 if (!(sub_action_flags & MLX5_FLOW_ACTION_PORT_ID))
4602                         return rte_flow_error_set(error, EINVAL,
4603                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4604                                                   NULL,
4605                                                   "E-Switch must has a dest "
4606                                                   "port for mirroring");
4607         }
4608         /* Continue validation for Xcap actions.*/
4609         if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) &&
4610             (queue_index == 0xFFFF ||
4611              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
4612                 if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
4613                      MLX5_FLOW_XCAP_ACTIONS)
4614                         return rte_flow_error_set(error, ENOTSUP,
4615                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4616                                                   NULL, "encap and decap "
4617                                                   "combination aren't "
4618                                                   "supported");
4619                 if (!attr->transfer && attr->ingress && (sub_action_flags &
4620                                                         MLX5_FLOW_ACTION_ENCAP))
4621                         return rte_flow_error_set(error, ENOTSUP,
4622                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4623                                                   NULL, "encap is not supported"
4624                                                   " for ingress traffic");
4625         }
4626         return 0;
4627 }
4628
4629 /**
4630  * Find existing modify-header resource or create and register a new one.
4631  *
4632  * @param dev[in, out]
4633  *   Pointer to rte_eth_dev structure.
4634  * @param[in, out] resource
4635  *   Pointer to modify-header resource.
4636  * @parm[in, out] dev_flow
4637  *   Pointer to the dev_flow.
4638  * @param[out] error
4639  *   pointer to error structure.
4640  *
4641  * @return
4642  *   0 on success otherwise -errno and errno is set.
4643  */
4644 static int
4645 flow_dv_modify_hdr_resource_register
4646                         (struct rte_eth_dev *dev,
4647                          struct mlx5_flow_dv_modify_hdr_resource *resource,
4648                          struct mlx5_flow *dev_flow,
4649                          struct rte_flow_error *error)
4650 {
4651         struct mlx5_priv *priv = dev->data->dev_private;
4652         struct mlx5_dev_ctx_shared *sh = priv->sh;
4653         uint32_t key_len = sizeof(*resource) -
4654                            offsetof(typeof(*resource), ft_type) +
4655                            resource->actions_num * sizeof(resource->actions[0]);
4656         struct mlx5_hlist_entry *entry;
4657         struct mlx5_flow_cb_ctx ctx = {
4658                 .error = error,
4659                 .data = resource,
4660         };
4661         uint64_t key64;
4662
4663         resource->flags = dev_flow->dv.group ? 0 :
4664                           MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
4665         if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
4666                                     resource->flags))
4667                 return rte_flow_error_set(error, EOVERFLOW,
4668                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4669                                           "too many modify header items");
4670         key64 = __rte_raw_cksum(&resource->ft_type, key_len, 0);
4671         entry = mlx5_hlist_register(sh->modify_cmds, key64, &ctx);
4672         if (!entry)
4673                 return -rte_errno;
4674         resource = container_of(entry, typeof(*resource), entry);
4675         dev_flow->handle->dvh.modify_hdr = resource;
4676         return 0;
4677 }
4678
4679 /**
4680  * Get DV flow counter by index.
4681  *
4682  * @param[in] dev
4683  *   Pointer to the Ethernet device structure.
4684  * @param[in] idx
4685  *   mlx5 flow counter index in the container.
4686  * @param[out] ppool
4687  *   mlx5 flow counter pool in the container,
4688  *
4689  * @return
4690  *   Pointer to the counter, NULL otherwise.
4691  */
4692 static struct mlx5_flow_counter *
4693 flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
4694                            uint32_t idx,
4695                            struct mlx5_flow_counter_pool **ppool)
4696 {
4697         struct mlx5_priv *priv = dev->data->dev_private;
4698         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4699         struct mlx5_flow_counter_pool *pool;
4700
4701         /* Decrease to original index and clear shared bit. */
4702         idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
4703         MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cmng->n);
4704         pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
4705         MLX5_ASSERT(pool);
4706         if (ppool)
4707                 *ppool = pool;
4708         return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
4709 }
4710
4711 /**
4712  * Check the devx counter belongs to the pool.
4713  *
4714  * @param[in] pool
4715  *   Pointer to the counter pool.
4716  * @param[in] id
4717  *   The counter devx ID.
4718  *
4719  * @return
4720  *   True if counter belongs to the pool, false otherwise.
4721  */
4722 static bool
4723 flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id)
4724 {
4725         int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
4726                    MLX5_COUNTERS_PER_POOL;
4727
4728         if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
4729                 return true;
4730         return false;
4731 }
4732
4733 /**
4734  * Get a pool by devx counter ID.
4735  *
4736  * @param[in] cmng
4737  *   Pointer to the counter management.
4738  * @param[in] id
4739  *   The counter devx ID.
4740  *
4741  * @return
4742  *   The counter pool pointer if exists, NULL otherwise,
4743  */
4744 static struct mlx5_flow_counter_pool *
4745 flow_dv_find_pool_by_id(struct mlx5_flow_counter_mng *cmng, int id)
4746 {
4747         uint32_t i;
4748         struct mlx5_flow_counter_pool *pool = NULL;
4749
4750         rte_spinlock_lock(&cmng->pool_update_sl);
4751         /* Check last used pool. */
4752         if (cmng->last_pool_idx != POOL_IDX_INVALID &&
4753             flow_dv_is_counter_in_pool(cmng->pools[cmng->last_pool_idx], id)) {
4754                 pool = cmng->pools[cmng->last_pool_idx];
4755                 goto out;
4756         }
4757         /* ID out of range means no suitable pool in the container. */
4758         if (id > cmng->max_id || id < cmng->min_id)
4759                 goto out;
4760         /*
4761          * Find the pool from the end of the container, since mostly counter
4762          * ID is sequence increasing, and the last pool should be the needed
4763          * one.
4764          */
4765         i = cmng->n_valid;
4766         while (i--) {
4767                 struct mlx5_flow_counter_pool *pool_tmp = cmng->pools[i];
4768
4769                 if (flow_dv_is_counter_in_pool(pool_tmp, id)) {
4770                         pool = pool_tmp;
4771                         break;
4772                 }
4773         }
4774 out:
4775         rte_spinlock_unlock(&cmng->pool_update_sl);
4776         return pool;
4777 }
4778
4779 /**
4780  * Resize a counter container.
4781  *
4782  * @param[in] dev
4783  *   Pointer to the Ethernet device structure.
4784  *
4785  * @return
4786  *   0 on success, otherwise negative errno value and rte_errno is set.
4787  */
4788 static int
4789 flow_dv_container_resize(struct rte_eth_dev *dev)
4790 {
4791         struct mlx5_priv *priv = dev->data->dev_private;
4792         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4793         void *old_pools = cmng->pools;
4794         uint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE;
4795         uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
4796         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
4797
4798         if (!pools) {
4799                 rte_errno = ENOMEM;
4800                 return -ENOMEM;
4801         }
4802         if (old_pools)
4803                 memcpy(pools, old_pools, cmng->n *
4804                                        sizeof(struct mlx5_flow_counter_pool *));
4805         cmng->n = resize;
4806         cmng->pools = pools;
4807         if (old_pools)
4808                 mlx5_free(old_pools);
4809         return 0;
4810 }
4811
4812 /**
4813  * Query a devx flow counter.
4814  *
4815  * @param[in] dev
4816  *   Pointer to the Ethernet device structure.
4817  * @param[in] cnt
4818  *   Index to the flow counter.
4819  * @param[out] pkts
4820  *   The statistics value of packets.
4821  * @param[out] bytes
4822  *   The statistics value of bytes.
4823  *
4824  * @return
4825  *   0 on success, otherwise a negative errno value and rte_errno is set.
4826  */
4827 static inline int
4828 _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
4829                      uint64_t *bytes)
4830 {
4831         struct mlx5_priv *priv = dev->data->dev_private;
4832         struct mlx5_flow_counter_pool *pool = NULL;
4833         struct mlx5_flow_counter *cnt;
4834         int offset;
4835
4836         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
4837         MLX5_ASSERT(pool);
4838         if (priv->sh->cmng.counter_fallback)
4839                 return mlx5_devx_cmd_flow_counter_query(cnt->dcs_when_active, 0,
4840                                         0, pkts, bytes, 0, NULL, NULL, 0);
4841         rte_spinlock_lock(&pool->sl);
4842         if (!pool->raw) {
4843                 *pkts = 0;
4844                 *bytes = 0;
4845         } else {
4846                 offset = MLX5_CNT_ARRAY_IDX(pool, cnt);
4847                 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
4848                 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
4849         }
4850         rte_spinlock_unlock(&pool->sl);
4851         return 0;
4852 }
4853
4854 /**
4855  * Create and initialize a new counter pool.
4856  *
4857  * @param[in] dev
4858  *   Pointer to the Ethernet device structure.
4859  * @param[out] dcs
4860  *   The devX counter handle.
4861  * @param[in] age
4862  *   Whether the pool is for counter that was allocated for aging.
4863  * @param[in/out] cont_cur
4864  *   Pointer to the container pointer, it will be update in pool resize.
4865  *
4866  * @return
4867  *   The pool container pointer on success, NULL otherwise and rte_errno is set.
4868  */
4869 static struct mlx5_flow_counter_pool *
4870 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
4871                     uint32_t age)
4872 {
4873         struct mlx5_priv *priv = dev->data->dev_private;
4874         struct mlx5_flow_counter_pool *pool;
4875         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4876         bool fallback = priv->sh->cmng.counter_fallback;
4877         uint32_t size = sizeof(*pool);
4878
4879         size += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE;
4880         size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * MLX5_AGE_SIZE);
4881         pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
4882         if (!pool) {
4883                 rte_errno = ENOMEM;
4884                 return NULL;
4885         }
4886         pool->raw = NULL;
4887         pool->is_aged = !!age;
4888         pool->query_gen = 0;
4889         pool->min_dcs = dcs;
4890         rte_spinlock_init(&pool->sl);
4891         rte_spinlock_init(&pool->csl);
4892         TAILQ_INIT(&pool->counters[0]);
4893         TAILQ_INIT(&pool->counters[1]);
4894         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
4895         rte_spinlock_lock(&cmng->pool_update_sl);
4896         pool->index = cmng->n_valid;
4897         if (pool->index == cmng->n && flow_dv_container_resize(dev)) {
4898                 mlx5_free(pool);
4899                 rte_spinlock_unlock(&cmng->pool_update_sl);
4900                 return NULL;
4901         }
4902         cmng->pools[pool->index] = pool;
4903         cmng->n_valid++;
4904         if (unlikely(fallback)) {
4905                 int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
4906
4907                 if (base < cmng->min_id)
4908                         cmng->min_id = base;
4909                 if (base > cmng->max_id)
4910                         cmng->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
4911                 cmng->last_pool_idx = pool->index;
4912         }
4913         rte_spinlock_unlock(&cmng->pool_update_sl);
4914         return pool;
4915 }
4916
4917 /**
4918  * Prepare a new counter and/or a new counter pool.
4919  *
4920  * @param[in] dev
4921  *   Pointer to the Ethernet device structure.
4922  * @param[out] cnt_free
4923  *   Where to put the pointer of a new counter.
4924  * @param[in] age
4925  *   Whether the pool is for counter that was allocated for aging.
4926  *
4927  * @return
4928  *   The counter pool pointer and @p cnt_free is set on success,
4929  *   NULL otherwise and rte_errno is set.
4930  */
4931 static struct mlx5_flow_counter_pool *
4932 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
4933                              struct mlx5_flow_counter **cnt_free,
4934                              uint32_t age)
4935 {
4936         struct mlx5_priv *priv = dev->data->dev_private;
4937         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4938         struct mlx5_flow_counter_pool *pool;
4939         struct mlx5_counters tmp_tq;
4940         struct mlx5_devx_obj *dcs = NULL;
4941         struct mlx5_flow_counter *cnt;
4942         enum mlx5_counter_type cnt_type =
4943                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
4944         bool fallback = priv->sh->cmng.counter_fallback;
4945         uint32_t i;
4946
4947         if (fallback) {
4948                 /* bulk_bitmap must be 0 for single counter allocation. */
4949                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
4950                 if (!dcs)
4951                         return NULL;
4952                 pool = flow_dv_find_pool_by_id(cmng, dcs->id);
4953                 if (!pool) {
4954                         pool = flow_dv_pool_create(dev, dcs, age);
4955                         if (!pool) {
4956                                 mlx5_devx_cmd_destroy(dcs);
4957                                 return NULL;
4958                         }
4959                 }
4960                 i = dcs->id % MLX5_COUNTERS_PER_POOL;
4961                 cnt = MLX5_POOL_GET_CNT(pool, i);
4962                 cnt->pool = pool;
4963                 cnt->dcs_when_free = dcs;
4964                 *cnt_free = cnt;
4965                 return pool;
4966         }
4967         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
4968         if (!dcs) {
4969                 rte_errno = ENODATA;
4970                 return NULL;
4971         }
4972         pool = flow_dv_pool_create(dev, dcs, age);
4973         if (!pool) {
4974                 mlx5_devx_cmd_destroy(dcs);
4975                 return NULL;
4976         }
4977         TAILQ_INIT(&tmp_tq);
4978         for (i = 1; i < MLX5_COUNTERS_PER_POOL; ++i) {
4979                 cnt = MLX5_POOL_GET_CNT(pool, i);
4980                 cnt->pool = pool;
4981                 TAILQ_INSERT_HEAD(&tmp_tq, cnt, next);
4982         }
4983         rte_spinlock_lock(&cmng->csl[cnt_type]);
4984         TAILQ_CONCAT(&cmng->counters[cnt_type], &tmp_tq, next);
4985         rte_spinlock_unlock(&cmng->csl[cnt_type]);
4986         *cnt_free = MLX5_POOL_GET_CNT(pool, 0);
4987         (*cnt_free)->pool = pool;
4988         return pool;
4989 }
4990
4991 /**
4992  * Allocate a flow counter.
4993  *
4994  * @param[in] dev
4995  *   Pointer to the Ethernet device structure.
4996  * @param[in] age
4997  *   Whether the counter was allocated for aging.
4998  *
4999  * @return
5000  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
5001  */
5002 static uint32_t
5003 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age)
5004 {
5005         struct mlx5_priv *priv = dev->data->dev_private;
5006         struct mlx5_flow_counter_pool *pool = NULL;
5007         struct mlx5_flow_counter *cnt_free = NULL;
5008         bool fallback = priv->sh->cmng.counter_fallback;
5009         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5010         enum mlx5_counter_type cnt_type =
5011                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
5012         uint32_t cnt_idx;
5013
5014         if (!priv->config.devx) {
5015                 rte_errno = ENOTSUP;
5016                 return 0;
5017         }
5018         /* Get free counters from container. */
5019         rte_spinlock_lock(&cmng->csl[cnt_type]);
5020         cnt_free = TAILQ_FIRST(&cmng->counters[cnt_type]);
5021         if (cnt_free)
5022                 TAILQ_REMOVE(&cmng->counters[cnt_type], cnt_free, next);
5023         rte_spinlock_unlock(&cmng->csl[cnt_type]);
5024         if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free, age))
5025                 goto err;
5026         pool = cnt_free->pool;
5027         if (fallback)
5028                 cnt_free->dcs_when_active = cnt_free->dcs_when_free;
5029         /* Create a DV counter action only in the first time usage. */
5030         if (!cnt_free->action) {
5031                 uint16_t offset;
5032                 struct mlx5_devx_obj *dcs;
5033                 int ret;
5034
5035                 if (!fallback) {
5036                         offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free);
5037                         dcs = pool->min_dcs;
5038                 } else {
5039                         offset = 0;
5040                         dcs = cnt_free->dcs_when_free;
5041                 }
5042                 ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset,
5043                                                             &cnt_free->action);
5044                 if (ret) {
5045                         rte_errno = errno;
5046                         goto err;
5047                 }
5048         }
5049         cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
5050                                 MLX5_CNT_ARRAY_IDX(pool, cnt_free));
5051         /* Update the counter reset values. */
5052         if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
5053                                  &cnt_free->bytes))
5054                 goto err;
5055         if (!fallback && !priv->sh->cmng.query_thread_on)
5056                 /* Start the asynchronous batch query by the host thread. */
5057                 mlx5_set_query_alarm(priv->sh);
5058         return cnt_idx;
5059 err:
5060         if (cnt_free) {
5061                 cnt_free->pool = pool;
5062                 if (fallback)
5063                         cnt_free->dcs_when_free = cnt_free->dcs_when_active;
5064                 rte_spinlock_lock(&cmng->csl[cnt_type]);
5065                 TAILQ_INSERT_TAIL(&cmng->counters[cnt_type], cnt_free, next);
5066                 rte_spinlock_unlock(&cmng->csl[cnt_type]);
5067         }
5068         return 0;
5069 }
5070
5071 /**
5072  * Allocate a shared flow counter.
5073  *
5074  * @param[in] ctx
5075  *   Pointer to the shared counter configuration.
5076  * @param[in] data
5077  *   Pointer to save the allocated counter index.
5078  *
5079  * @return
5080  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
5081  */
5082
5083 static int32_t
5084 flow_dv_counter_alloc_shared_cb(void *ctx, union mlx5_l3t_data *data)
5085 {
5086         struct mlx5_shared_counter_conf *conf = ctx;
5087         struct rte_eth_dev *dev = conf->dev;
5088         struct mlx5_flow_counter *cnt;
5089
5090         data->dword = flow_dv_counter_alloc(dev, 0);
5091         data->dword |= MLX5_CNT_SHARED_OFFSET;
5092         cnt = flow_dv_counter_get_by_idx(dev, data->dword, NULL);
5093         cnt->shared_info.id = conf->id;
5094         return 0;
5095 }
5096
5097 /**
5098  * Get a shared flow counter.
5099  *
5100  * @param[in] dev
5101  *   Pointer to the Ethernet device structure.
5102  * @param[in] id
5103  *   Counter identifier.
5104  *
5105  * @return
5106  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
5107  */
5108 static uint32_t
5109 flow_dv_counter_get_shared(struct rte_eth_dev *dev, uint32_t id)
5110 {
5111         struct mlx5_priv *priv = dev->data->dev_private;
5112         struct mlx5_shared_counter_conf conf = {
5113                 .dev = dev,
5114                 .id = id,
5115         };
5116         union mlx5_l3t_data data = {
5117                 .dword = 0,
5118         };
5119
5120         mlx5_l3t_prepare_entry(priv->sh->cnt_id_tbl, id, &data,
5121                                flow_dv_counter_alloc_shared_cb, &conf);
5122         return data.dword;
5123 }
5124
5125 /**
5126  * Get age param from counter index.
5127  *
5128  * @param[in] dev
5129  *   Pointer to the Ethernet device structure.
5130  * @param[in] counter
5131  *   Index to the counter handler.
5132  *
5133  * @return
5134  *   The aging parameter specified for the counter index.
5135  */
5136 static struct mlx5_age_param*
5137 flow_dv_counter_idx_get_age(struct rte_eth_dev *dev,
5138                                 uint32_t counter)
5139 {
5140         struct mlx5_flow_counter *cnt;
5141         struct mlx5_flow_counter_pool *pool = NULL;
5142
5143         flow_dv_counter_get_by_idx(dev, counter, &pool);
5144         counter = (counter - 1) % MLX5_COUNTERS_PER_POOL;
5145         cnt = MLX5_POOL_GET_CNT(pool, counter);
5146         return MLX5_CNT_TO_AGE(cnt);
5147 }
5148
5149 /**
5150  * Remove a flow counter from aged counter list.
5151  *
5152  * @param[in] dev
5153  *   Pointer to the Ethernet device structure.
5154  * @param[in] counter
5155  *   Index to the counter handler.
5156  * @param[in] cnt
5157  *   Pointer to the counter handler.
5158  */
5159 static void
5160 flow_dv_counter_remove_from_age(struct rte_eth_dev *dev,
5161                                 uint32_t counter, struct mlx5_flow_counter *cnt)
5162 {
5163         struct mlx5_age_info *age_info;
5164         struct mlx5_age_param *age_param;
5165         struct mlx5_priv *priv = dev->data->dev_private;
5166         uint16_t expected = AGE_CANDIDATE;
5167
5168         age_info = GET_PORT_AGE_INFO(priv);
5169         age_param = flow_dv_counter_idx_get_age(dev, counter);
5170         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
5171                                          AGE_FREE, false, __ATOMIC_RELAXED,
5172                                          __ATOMIC_RELAXED)) {
5173                 /**
5174                  * We need the lock even it is age timeout,
5175                  * since counter may still in process.
5176                  */
5177                 rte_spinlock_lock(&age_info->aged_sl);
5178                 TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
5179                 rte_spinlock_unlock(&age_info->aged_sl);
5180                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
5181         }
5182 }
5183
5184 /**
5185  * Release a flow counter.
5186  *
5187  * @param[in] dev
5188  *   Pointer to the Ethernet device structure.
5189  * @param[in] counter
5190  *   Index to the counter handler.
5191  */
5192 static void
5193 flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter)
5194 {
5195         struct mlx5_priv *priv = dev->data->dev_private;
5196         struct mlx5_flow_counter_pool *pool = NULL;
5197         struct mlx5_flow_counter *cnt;
5198         enum mlx5_counter_type cnt_type;
5199
5200         if (!counter)
5201                 return;
5202         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
5203         MLX5_ASSERT(pool);
5204         if (IS_SHARED_CNT(counter) &&
5205             mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl, cnt->shared_info.id))
5206                 return;
5207         if (pool->is_aged)
5208                 flow_dv_counter_remove_from_age(dev, counter, cnt);
5209         cnt->pool = pool;
5210         /*
5211          * Put the counter back to list to be updated in none fallback mode.
5212          * Currently, we are using two list alternately, while one is in query,
5213          * add the freed counter to the other list based on the pool query_gen
5214          * value. After query finishes, add counter the list to the global
5215          * container counter list. The list changes while query starts. In
5216          * this case, lock will not be needed as query callback and release
5217          * function both operate with the different list.
5218          *
5219          */
5220         if (!priv->sh->cmng.counter_fallback) {
5221                 rte_spinlock_lock(&pool->csl);
5222                 TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
5223                 rte_spinlock_unlock(&pool->csl);
5224         } else {
5225                 cnt->dcs_when_free = cnt->dcs_when_active;
5226                 cnt_type = pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
5227                                            MLX5_COUNTER_TYPE_ORIGIN;
5228                 rte_spinlock_lock(&priv->sh->cmng.csl[cnt_type]);
5229                 TAILQ_INSERT_TAIL(&priv->sh->cmng.counters[cnt_type],
5230                                   cnt, next);
5231                 rte_spinlock_unlock(&priv->sh->cmng.csl[cnt_type]);
5232         }
5233 }
5234
5235 /**
5236  * Verify the @p attributes will be correctly understood by the NIC and store
5237  * them in the @p flow if everything is correct.
5238  *
5239  * @param[in] dev
5240  *   Pointer to dev struct.
5241  * @param[in] attributes
5242  *   Pointer to flow attributes
5243  * @param[in] external
5244  *   This flow rule is created by request external to PMD.
5245  * @param[out] error
5246  *   Pointer to error structure.
5247  *
5248  * @return
5249  *   - 0 on success and non root table.
5250  *   - 1 on success and root table.
5251  *   - a negative errno value otherwise and rte_errno is set.
5252  */
5253 static int
5254 flow_dv_validate_attributes(struct rte_eth_dev *dev,
5255                             const struct mlx5_flow_tunnel *tunnel,
5256                             const struct rte_flow_attr *attributes,
5257                             const struct flow_grp_info *grp_info,
5258                             struct rte_flow_error *error)
5259 {
5260         struct mlx5_priv *priv = dev->data->dev_private;
5261         uint32_t lowest_priority = mlx5_get_lowest_priority(dev, attributes);
5262         int ret = 0;
5263
5264 #ifndef HAVE_MLX5DV_DR
5265         RTE_SET_USED(tunnel);
5266         RTE_SET_USED(grp_info);
5267         if (attributes->group)
5268                 return rte_flow_error_set(error, ENOTSUP,
5269                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
5270                                           NULL,
5271                                           "groups are not supported");
5272 #else
5273         uint32_t table = 0;
5274
5275         ret = mlx5_flow_group_to_table(dev, tunnel, attributes->group, &table,
5276                                        grp_info, error);
5277         if (ret)
5278                 return ret;
5279         if (!table)
5280                 ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
5281 #endif
5282         if (attributes->priority != MLX5_FLOW_LOWEST_PRIO_INDICATOR &&
5283             attributes->priority > lowest_priority)
5284                 return rte_flow_error_set(error, ENOTSUP,
5285                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
5286                                           NULL,
5287                                           "priority out of range");
5288         if (attributes->transfer) {
5289                 if (!priv->config.dv_esw_en)
5290                         return rte_flow_error_set
5291                                 (error, ENOTSUP,
5292                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5293                                  "E-Switch dr is not supported");
5294                 if (!(priv->representor || priv->master))
5295                         return rte_flow_error_set
5296                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5297                                  NULL, "E-Switch configuration can only be"
5298                                  " done by a master or a representor device");
5299                 if (attributes->egress)
5300                         return rte_flow_error_set
5301                                 (error, ENOTSUP,
5302                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
5303                                  "egress is not supported");
5304         }
5305         if (!(attributes->egress ^ attributes->ingress))
5306                 return rte_flow_error_set(error, ENOTSUP,
5307                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
5308                                           "must specify exactly one of "
5309                                           "ingress or egress");
5310         return ret;
5311 }
5312
5313 /**
5314  * Internal validation function. For validating both actions and items.
5315  *
5316  * @param[in] dev
5317  *   Pointer to the rte_eth_dev structure.
5318  * @param[in] attr
5319  *   Pointer to the flow attributes.
5320  * @param[in] items
5321  *   Pointer to the list of items.
5322  * @param[in] actions
5323  *   Pointer to the list of actions.
5324  * @param[in] external
5325  *   This flow rule is created by request external to PMD.
5326  * @param[in] hairpin
5327  *   Number of hairpin TX actions, 0 means classic flow.
5328  * @param[out] error
5329  *   Pointer to the error structure.
5330  *
5331  * @return
5332  *   0 on success, a negative errno value otherwise and rte_errno is set.
5333  */
5334 static int
5335 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
5336                  const struct rte_flow_item items[],
5337                  const struct rte_flow_action actions[],
5338                  bool external, int hairpin, struct rte_flow_error *error)
5339 {
5340         int ret;
5341         uint64_t action_flags = 0;
5342         uint64_t item_flags = 0;
5343         uint64_t last_item = 0;
5344         uint8_t next_protocol = 0xff;
5345         uint16_t ether_type = 0;
5346         int actions_n = 0;
5347         uint8_t item_ipv6_proto = 0;
5348         const struct rte_flow_item *geneve_item = NULL;
5349         const struct rte_flow_item *gre_item = NULL;
5350         const struct rte_flow_item *gtp_item = NULL;
5351         const struct rte_flow_action_raw_decap *decap;
5352         const struct rte_flow_action_raw_encap *encap;
5353         const struct rte_flow_action_rss *rss = NULL;
5354         const struct rte_flow_action_rss *sample_rss = NULL;
5355         const struct rte_flow_item_tcp nic_tcp_mask = {
5356                 .hdr = {
5357                         .tcp_flags = 0xFF,
5358                         .src_port = RTE_BE16(UINT16_MAX),
5359                         .dst_port = RTE_BE16(UINT16_MAX),
5360                 }
5361         };
5362         const struct rte_flow_item_ipv6 nic_ipv6_mask = {
5363                 .hdr = {
5364                         .src_addr =
5365                         "\xff\xff\xff\xff\xff\xff\xff\xff"
5366                         "\xff\xff\xff\xff\xff\xff\xff\xff",
5367                         .dst_addr =
5368                         "\xff\xff\xff\xff\xff\xff\xff\xff"
5369                         "\xff\xff\xff\xff\xff\xff\xff\xff",
5370                         .vtc_flow = RTE_BE32(0xffffffff),
5371                         .proto = 0xff,
5372                         .hop_limits = 0xff,
5373                 },
5374                 .has_frag_ext = 1,
5375         };
5376         const struct rte_flow_item_ecpri nic_ecpri_mask = {
5377                 .hdr = {
5378                         .common = {
5379                                 .u32 =
5380                                 RTE_BE32(((const struct rte_ecpri_common_hdr) {
5381                                         .type = 0xFF,
5382                                         }).u32),
5383                         },
5384                         .dummy[0] = 0xffffffff,
5385                 },
5386         };
5387         struct mlx5_priv *priv = dev->data->dev_private;
5388         struct mlx5_dev_config *dev_conf = &priv->config;
5389         uint16_t queue_index = 0xFFFF;
5390         const struct rte_flow_item_vlan *vlan_m = NULL;
5391         int16_t rw_act_num = 0;
5392         uint64_t is_root;
5393         const struct mlx5_flow_tunnel *tunnel;
5394         struct flow_grp_info grp_info = {
5395                 .external = !!external,
5396                 .transfer = !!attr->transfer,
5397                 .fdb_def_rule = !!priv->fdb_def_rule,
5398         };
5399         const struct rte_eth_hairpin_conf *conf;
5400
5401         if (items == NULL)
5402                 return -1;
5403         if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
5404                 tunnel = flow_items_to_tunnel(items);
5405                 action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |
5406                                 MLX5_FLOW_ACTION_DECAP;
5407         } else if (is_flow_tunnel_steer_rule(dev, attr, items, actions)) {
5408                 tunnel = flow_actions_to_tunnel(actions);
5409                 action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
5410         } else {
5411                 tunnel = NULL;
5412         }
5413         if (tunnel && priv->representor)
5414                 return rte_flow_error_set(error, ENOTSUP,
5415                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5416                                           "decap not supported "
5417                                           "for VF representor");
5418         grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
5419                                 (dev, tunnel, attr, items, actions);
5420         ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error);
5421         if (ret < 0)
5422                 return ret;
5423         is_root = (uint64_t)ret;
5424         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
5425                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
5426                 int type = items->type;
5427
5428                 if (!mlx5_flow_os_item_supported(type))
5429                         return rte_flow_error_set(error, ENOTSUP,
5430                                                   RTE_FLOW_ERROR_TYPE_ITEM,
5431                                                   NULL, "item not supported");
5432                 switch (type) {
5433                 case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
5434                         if (items[0].type != (typeof(items[0].type))
5435                                                 MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL)
5436                                 return rte_flow_error_set
5437                                                 (error, EINVAL,
5438                                                 RTE_FLOW_ERROR_TYPE_ITEM,
5439                                                 NULL, "MLX5 private items "
5440                                                 "must be the first");
5441                         break;
5442                 case RTE_FLOW_ITEM_TYPE_VOID:
5443                         break;
5444                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
5445                         ret = flow_dv_validate_item_port_id
5446                                         (dev, items, attr, item_flags, error);
5447                         if (ret < 0)
5448                                 return ret;
5449                         last_item = MLX5_FLOW_ITEM_PORT_ID;
5450                         break;
5451                 case RTE_FLOW_ITEM_TYPE_ETH:
5452                         ret = mlx5_flow_validate_item_eth(items, item_flags,
5453                                                           true, error);
5454                         if (ret < 0)
5455                                 return ret;
5456                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
5457                                              MLX5_FLOW_LAYER_OUTER_L2;
5458                         if (items->mask != NULL && items->spec != NULL) {
5459                                 ether_type =
5460                                         ((const struct rte_flow_item_eth *)
5461                                          items->spec)->type;
5462                                 ether_type &=
5463                                         ((const struct rte_flow_item_eth *)
5464                                          items->mask)->type;
5465                                 ether_type = rte_be_to_cpu_16(ether_type);
5466                         } else {
5467                                 ether_type = 0;
5468                         }
5469                         break;
5470                 case RTE_FLOW_ITEM_TYPE_VLAN:
5471                         ret = flow_dv_validate_item_vlan(items, item_flags,
5472                                                          dev, error);
5473                         if (ret < 0)
5474                                 return ret;
5475                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
5476                                              MLX5_FLOW_LAYER_OUTER_VLAN;
5477                         if (items->mask != NULL && items->spec != NULL) {
5478                                 ether_type =
5479                                         ((const struct rte_flow_item_vlan *)
5480                                          items->spec)->inner_type;
5481                                 ether_type &=
5482                                         ((const struct rte_flow_item_vlan *)
5483                                          items->mask)->inner_type;
5484                                 ether_type = rte_be_to_cpu_16(ether_type);
5485                         } else {
5486                                 ether_type = 0;
5487                         }
5488                         /* Store outer VLAN mask for of_push_vlan action. */
5489                         if (!tunnel)
5490                                 vlan_m = items->mask;
5491                         break;
5492                 case RTE_FLOW_ITEM_TYPE_IPV4:
5493                         mlx5_flow_tunnel_ip_check(items, next_protocol,
5494                                                   &item_flags, &tunnel);
5495                         ret = flow_dv_validate_item_ipv4(items, item_flags,
5496                                                          last_item, ether_type,
5497                                                          error);
5498                         if (ret < 0)
5499                                 return ret;
5500                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
5501                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
5502                         if (items->mask != NULL &&
5503                             ((const struct rte_flow_item_ipv4 *)
5504                              items->mask)->hdr.next_proto_id) {
5505                                 next_protocol =
5506                                         ((const struct rte_flow_item_ipv4 *)
5507                                          (items->spec))->hdr.next_proto_id;
5508                                 next_protocol &=
5509                                         ((const struct rte_flow_item_ipv4 *)
5510                                          (items->mask))->hdr.next_proto_id;
5511                         } else {
5512                                 /* Reset for inner layer. */
5513                                 next_protocol = 0xff;
5514                         }
5515                         break;
5516                 case RTE_FLOW_ITEM_TYPE_IPV6:
5517                         mlx5_flow_tunnel_ip_check(items, next_protocol,
5518                                                   &item_flags, &tunnel);
5519                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
5520                                                            last_item,
5521                                                            ether_type,
5522                                                            &nic_ipv6_mask,
5523                                                            error);
5524                         if (ret < 0)
5525                                 return ret;
5526                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
5527                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
5528                         if (items->mask != NULL &&
5529                             ((const struct rte_flow_item_ipv6 *)
5530                              items->mask)->hdr.proto) {
5531                                 item_ipv6_proto =
5532                                         ((const struct rte_flow_item_ipv6 *)
5533                                          items->spec)->hdr.proto;
5534                                 next_protocol =
5535                                         ((const struct rte_flow_item_ipv6 *)
5536                                          items->spec)->hdr.proto;
5537                                 next_protocol &=
5538                                         ((const struct rte_flow_item_ipv6 *)
5539                                          items->mask)->hdr.proto;
5540                         } else {
5541                                 /* Reset for inner layer. */
5542                                 next_protocol = 0xff;
5543                         }
5544                         break;
5545                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
5546                         ret = flow_dv_validate_item_ipv6_frag_ext(items,
5547                                                                   item_flags,
5548                                                                   error);
5549                         if (ret < 0)
5550                                 return ret;
5551                         last_item = tunnel ?
5552                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
5553                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
5554                         if (items->mask != NULL &&
5555                             ((const struct rte_flow_item_ipv6_frag_ext *)
5556                              items->mask)->hdr.next_header) {
5557                                 next_protocol =
5558                                 ((const struct rte_flow_item_ipv6_frag_ext *)
5559                                  items->spec)->hdr.next_header;
5560                                 next_protocol &=
5561                                 ((const struct rte_flow_item_ipv6_frag_ext *)
5562                                  items->mask)->hdr.next_header;
5563                         } else {
5564                                 /* Reset for inner layer. */
5565                                 next_protocol = 0xff;
5566                         }
5567                         break;
5568                 case RTE_FLOW_ITEM_TYPE_TCP:
5569                         ret = mlx5_flow_validate_item_tcp
5570                                                 (items, item_flags,
5571                                                  next_protocol,
5572                                                  &nic_tcp_mask,
5573                                                  error);
5574                         if (ret < 0)
5575                                 return ret;
5576                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
5577                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
5578                         break;
5579                 case RTE_FLOW_ITEM_TYPE_UDP:
5580                         ret = mlx5_flow_validate_item_udp(items, item_flags,
5581                                                           next_protocol,
5582                                                           error);
5583                         if (ret < 0)
5584                                 return ret;
5585                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
5586                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
5587                         break;
5588                 case RTE_FLOW_ITEM_TYPE_GRE:
5589                         ret = mlx5_flow_validate_item_gre(items, item_flags,
5590                                                           next_protocol, error);
5591                         if (ret < 0)
5592                                 return ret;
5593                         gre_item = items;
5594                         last_item = MLX5_FLOW_LAYER_GRE;
5595                         break;
5596                 case RTE_FLOW_ITEM_TYPE_NVGRE:
5597                         ret = mlx5_flow_validate_item_nvgre(items, item_flags,
5598                                                             next_protocol,
5599                                                             error);
5600                         if (ret < 0)
5601                                 return ret;
5602                         last_item = MLX5_FLOW_LAYER_NVGRE;
5603                         break;
5604                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
5605                         ret = mlx5_flow_validate_item_gre_key
5606                                 (items, item_flags, gre_item, error);
5607                         if (ret < 0)
5608                                 return ret;
5609                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
5610                         break;
5611                 case RTE_FLOW_ITEM_TYPE_VXLAN:
5612                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
5613                                                             error);
5614                         if (ret < 0)
5615                                 return ret;
5616                         last_item = MLX5_FLOW_LAYER_VXLAN;
5617                         break;
5618                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
5619                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
5620                                                                 item_flags, dev,
5621                                                                 error);
5622                         if (ret < 0)
5623                                 return ret;
5624                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
5625                         break;
5626                 case RTE_FLOW_ITEM_TYPE_GENEVE:
5627                         ret = mlx5_flow_validate_item_geneve(items,
5628                                                              item_flags, dev,
5629                                                              error);
5630                         if (ret < 0)
5631                                 return ret;
5632                         geneve_item = items;
5633                         last_item = MLX5_FLOW_LAYER_GENEVE;
5634                         break;
5635                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
5636                         ret = mlx5_flow_validate_item_geneve_opt(items,
5637                                                                  last_item,
5638                                                                  geneve_item,
5639                                                                  dev,
5640                                                                  error);
5641                         if (ret < 0)
5642                                 return ret;
5643                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
5644                         break;
5645                 case RTE_FLOW_ITEM_TYPE_MPLS:
5646                         ret = mlx5_flow_validate_item_mpls(dev, items,
5647                                                            item_flags,
5648                                                            last_item, error);
5649                         if (ret < 0)
5650                                 return ret;
5651                         last_item = MLX5_FLOW_LAYER_MPLS;
5652                         break;
5653
5654                 case RTE_FLOW_ITEM_TYPE_MARK:
5655                         ret = flow_dv_validate_item_mark(dev, items, attr,
5656                                                          error);
5657                         if (ret < 0)
5658                                 return ret;
5659                         last_item = MLX5_FLOW_ITEM_MARK;
5660                         break;
5661                 case RTE_FLOW_ITEM_TYPE_META:
5662                         ret = flow_dv_validate_item_meta(dev, items, attr,
5663                                                          error);
5664                         if (ret < 0)
5665                                 return ret;
5666                         last_item = MLX5_FLOW_ITEM_METADATA;
5667                         break;
5668                 case RTE_FLOW_ITEM_TYPE_ICMP:
5669                         ret = mlx5_flow_validate_item_icmp(items, item_flags,
5670                                                            next_protocol,
5671                                                            error);
5672                         if (ret < 0)
5673                                 return ret;
5674                         last_item = MLX5_FLOW_LAYER_ICMP;
5675                         break;
5676                 case RTE_FLOW_ITEM_TYPE_ICMP6:
5677                         ret = mlx5_flow_validate_item_icmp6(items, item_flags,
5678                                                             next_protocol,
5679                                                             error);
5680                         if (ret < 0)
5681                                 return ret;
5682                         item_ipv6_proto = IPPROTO_ICMPV6;
5683                         last_item = MLX5_FLOW_LAYER_ICMP6;
5684                         break;
5685                 case RTE_FLOW_ITEM_TYPE_TAG:
5686                         ret = flow_dv_validate_item_tag(dev, items,
5687                                                         attr, error);
5688                         if (ret < 0)
5689                                 return ret;
5690                         last_item = MLX5_FLOW_ITEM_TAG;
5691                         break;
5692                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
5693                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
5694                         break;
5695                 case RTE_FLOW_ITEM_TYPE_GTP:
5696                         ret = flow_dv_validate_item_gtp(dev, items, item_flags,
5697                                                         error);
5698                         if (ret < 0)
5699                                 return ret;
5700                         gtp_item = items;
5701                         last_item = MLX5_FLOW_LAYER_GTP;
5702                         break;
5703                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
5704                         ret = flow_dv_validate_item_gtp_psc(items, last_item,
5705                                                             gtp_item, attr,
5706                                                             error);
5707                         if (ret < 0)
5708                                 return ret;
5709                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
5710                         break;
5711                 case RTE_FLOW_ITEM_TYPE_ECPRI:
5712                         /* Capacity will be checked in the translate stage. */
5713                         ret = mlx5_flow_validate_item_ecpri(items, item_flags,
5714                                                             last_item,
5715                                                             ether_type,
5716                                                             &nic_ecpri_mask,
5717                                                             error);
5718                         if (ret < 0)
5719                                 return ret;
5720                         last_item = MLX5_FLOW_LAYER_ECPRI;
5721                         break;
5722                 default:
5723                         return rte_flow_error_set(error, ENOTSUP,
5724                                                   RTE_FLOW_ERROR_TYPE_ITEM,
5725                                                   NULL, "item not supported");
5726                 }
5727                 item_flags |= last_item;
5728         }
5729         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
5730                 int type = actions->type;
5731
5732                 if (!mlx5_flow_os_action_supported(type))
5733                         return rte_flow_error_set(error, ENOTSUP,
5734                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5735                                                   actions,
5736                                                   "action not supported");
5737                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
5738                         return rte_flow_error_set(error, ENOTSUP,
5739                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5740                                                   actions, "too many actions");
5741                 switch (type) {
5742                 case RTE_FLOW_ACTION_TYPE_VOID:
5743                         break;
5744                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
5745                         ret = flow_dv_validate_action_port_id(dev,
5746                                                               action_flags,
5747                                                               actions,
5748                                                               attr,
5749                                                               error);
5750                         if (ret)
5751                                 return ret;
5752                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5753                         ++actions_n;
5754                         break;
5755                 case RTE_FLOW_ACTION_TYPE_FLAG:
5756                         ret = flow_dv_validate_action_flag(dev, action_flags,
5757                                                            attr, error);
5758                         if (ret < 0)
5759                                 return ret;
5760                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
5761                                 /* Count all modify-header actions as one. */
5762                                 if (!(action_flags &
5763                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
5764                                         ++actions_n;
5765                                 action_flags |= MLX5_FLOW_ACTION_FLAG |
5766                                                 MLX5_FLOW_ACTION_MARK_EXT;
5767                         } else {
5768                                 action_flags |= MLX5_FLOW_ACTION_FLAG;
5769                                 ++actions_n;
5770                         }
5771                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
5772                         break;
5773                 case RTE_FLOW_ACTION_TYPE_MARK:
5774                         ret = flow_dv_validate_action_mark(dev, actions,
5775                                                            action_flags,
5776                                                            attr, error);
5777                         if (ret < 0)
5778                                 return ret;
5779                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
5780                                 /* Count all modify-header actions as one. */
5781                                 if (!(action_flags &
5782                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
5783                                         ++actions_n;
5784                                 action_flags |= MLX5_FLOW_ACTION_MARK |
5785                                                 MLX5_FLOW_ACTION_MARK_EXT;
5786                         } else {
5787                                 action_flags |= MLX5_FLOW_ACTION_MARK;
5788                                 ++actions_n;
5789                         }
5790                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
5791                         break;
5792                 case RTE_FLOW_ACTION_TYPE_SET_META:
5793                         ret = flow_dv_validate_action_set_meta(dev, actions,
5794                                                                action_flags,
5795                                                                attr, error);
5796                         if (ret < 0)
5797                                 return ret;
5798                         /* Count all modify-header actions as one action. */
5799                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5800                                 ++actions_n;
5801                         action_flags |= MLX5_FLOW_ACTION_SET_META;
5802                         rw_act_num += MLX5_ACT_NUM_SET_META;
5803                         break;
5804                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
5805                         ret = flow_dv_validate_action_set_tag(dev, actions,
5806                                                               action_flags,
5807                                                               attr, error);
5808                         if (ret < 0)
5809                                 return ret;
5810                         /* Count all modify-header actions as one action. */
5811                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5812                                 ++actions_n;
5813                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
5814                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
5815                         break;
5816                 case RTE_FLOW_ACTION_TYPE_DROP:
5817                         ret = mlx5_flow_validate_action_drop(action_flags,
5818                                                              attr, error);
5819                         if (ret < 0)
5820                                 return ret;
5821                         action_flags |= MLX5_FLOW_ACTION_DROP;
5822                         ++actions_n;
5823                         break;
5824                 case RTE_FLOW_ACTION_TYPE_QUEUE:
5825                         ret = mlx5_flow_validate_action_queue(actions,
5826                                                               action_flags, dev,
5827                                                               attr, error);
5828                         if (ret < 0)
5829                                 return ret;
5830                         queue_index = ((const struct rte_flow_action_queue *)
5831                                                         (actions->conf))->index;
5832                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
5833                         ++actions_n;
5834                         break;
5835                 case RTE_FLOW_ACTION_TYPE_RSS:
5836                         rss = actions->conf;
5837                         ret = mlx5_flow_validate_action_rss(actions,
5838                                                             action_flags, dev,
5839                                                             attr, item_flags,
5840                                                             error);
5841                         if (ret < 0)
5842                                 return ret;
5843                         if (rss && sample_rss &&
5844                             (sample_rss->level != rss->level ||
5845                             sample_rss->types != rss->types))
5846                                 return rte_flow_error_set(error, ENOTSUP,
5847                                         RTE_FLOW_ERROR_TYPE_ACTION,
5848                                         NULL,
5849                                         "Can't use the different RSS types "
5850                                         "or level in the same flow");
5851                         if (rss != NULL && rss->queue_num)
5852                                 queue_index = rss->queue[0];
5853                         action_flags |= MLX5_FLOW_ACTION_RSS;
5854                         ++actions_n;
5855                         break;
5856                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
5857                         ret =
5858                         mlx5_flow_validate_action_default_miss(action_flags,
5859                                         attr, error);
5860                         if (ret < 0)
5861                                 return ret;
5862                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
5863                         ++actions_n;
5864                         break;
5865                 case RTE_FLOW_ACTION_TYPE_COUNT:
5866                         ret = flow_dv_validate_action_count(dev, action_flags,
5867                                                             error);
5868                         if (ret < 0)
5869                                 return ret;
5870                         action_flags |= MLX5_FLOW_ACTION_COUNT;
5871                         ++actions_n;
5872                         break;
5873                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
5874                         if (flow_dv_validate_action_pop_vlan(dev,
5875                                                              action_flags,
5876                                                              actions,
5877                                                              item_flags, attr,
5878                                                              error))
5879                                 return -rte_errno;
5880                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
5881                         ++actions_n;
5882                         break;
5883                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
5884                         ret = flow_dv_validate_action_push_vlan(dev,
5885                                                                 action_flags,
5886                                                                 vlan_m,
5887                                                                 actions, attr,
5888                                                                 error);
5889                         if (ret < 0)
5890                                 return ret;
5891                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
5892                         ++actions_n;
5893                         break;
5894                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
5895                         ret = flow_dv_validate_action_set_vlan_pcp
5896                                                 (action_flags, actions, error);
5897                         if (ret < 0)
5898                                 return ret;
5899                         /* Count PCP with push_vlan command. */
5900                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
5901                         break;
5902                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
5903                         ret = flow_dv_validate_action_set_vlan_vid
5904                                                 (item_flags, action_flags,
5905                                                  actions, error);
5906                         if (ret < 0)
5907                                 return ret;
5908                         /* Count VID with push_vlan command. */
5909                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
5910                         rw_act_num += MLX5_ACT_NUM_MDF_VID;
5911                         break;
5912                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5913                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5914                         ret = flow_dv_validate_action_l2_encap(dev,
5915                                                                action_flags,
5916                                                                actions, attr,
5917                                                                error);
5918                         if (ret < 0)
5919                                 return ret;
5920                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
5921                         ++actions_n;
5922                         break;
5923                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
5924                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
5925                         ret = flow_dv_validate_action_decap(dev, action_flags,
5926                                                             actions, item_flags,
5927                                                             attr, error);
5928                         if (ret < 0)
5929                                 return ret;
5930                         action_flags |= MLX5_FLOW_ACTION_DECAP;
5931                         ++actions_n;
5932                         break;
5933                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5934                         ret = flow_dv_validate_action_raw_encap_decap
5935                                 (dev, NULL, actions->conf, attr, &action_flags,
5936                                  &actions_n, actions, item_flags, error);
5937                         if (ret < 0)
5938                                 return ret;
5939                         break;
5940                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
5941                         decap = actions->conf;
5942                         while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
5943                                 ;
5944                         if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
5945                                 encap = NULL;
5946                                 actions--;
5947                         } else {
5948                                 encap = actions->conf;
5949                         }
5950                         ret = flow_dv_validate_action_raw_encap_decap
5951                                            (dev,
5952                                             decap ? decap : &empty_decap, encap,
5953                                             attr, &action_flags, &actions_n,
5954                                             actions, item_flags, error);
5955                         if (ret < 0)
5956                                 return ret;
5957                         break;
5958                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
5959                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
5960                         ret = flow_dv_validate_action_modify_mac(action_flags,
5961                                                                  actions,
5962                                                                  item_flags,
5963                                                                  error);
5964                         if (ret < 0)
5965                                 return ret;
5966                         /* Count all modify-header actions as one action. */
5967                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5968                                 ++actions_n;
5969                         action_flags |= actions->type ==
5970                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
5971                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
5972                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
5973                         /*
5974                          * Even if the source and destination MAC addresses have
5975                          * overlap in the header with 4B alignment, the convert
5976                          * function will handle them separately and 4 SW actions
5977                          * will be created. And 2 actions will be added each
5978                          * time no matter how many bytes of address will be set.
5979                          */
5980                         rw_act_num += MLX5_ACT_NUM_MDF_MAC;
5981                         break;
5982                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
5983                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
5984                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
5985                                                                   actions,
5986                                                                   item_flags,
5987                                                                   error);
5988                         if (ret < 0)
5989                                 return ret;
5990                         /* Count all modify-header actions as one action. */
5991                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5992                                 ++actions_n;
5993                         action_flags |= actions->type ==
5994                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
5995                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
5996                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
5997                         rw_act_num += MLX5_ACT_NUM_MDF_IPV4;
5998                         break;
5999                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
6000                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
6001                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
6002                                                                   actions,
6003                                                                   item_flags,
6004                                                                   error);
6005                         if (ret < 0)
6006                                 return ret;
6007                         if (item_ipv6_proto == IPPROTO_ICMPV6)
6008                                 return rte_flow_error_set(error, ENOTSUP,
6009                                         RTE_FLOW_ERROR_TYPE_ACTION,
6010                                         actions,
6011                                         "Can't change header "
6012                                         "with ICMPv6 proto");
6013                         /* Count all modify-header actions as one action. */
6014                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6015                                 ++actions_n;
6016                         action_flags |= actions->type ==
6017                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
6018                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
6019                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
6020                         rw_act_num += MLX5_ACT_NUM_MDF_IPV6;
6021                         break;
6022                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
6023                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
6024                         ret = flow_dv_validate_action_modify_tp(action_flags,
6025                                                                 actions,
6026                                                                 item_flags,
6027                                                                 error);
6028                         if (ret < 0)
6029                                 return ret;
6030                         /* Count all modify-header actions as one action. */
6031                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6032                                 ++actions_n;
6033                         action_flags |= actions->type ==
6034                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
6035                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
6036                                                 MLX5_FLOW_ACTION_SET_TP_DST;
6037                         rw_act_num += MLX5_ACT_NUM_MDF_PORT;
6038                         break;
6039                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
6040                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
6041                         ret = flow_dv_validate_action_modify_ttl(action_flags,
6042                                                                  actions,
6043                                                                  item_flags,
6044                                                                  error);
6045                         if (ret < 0)
6046                                 return ret;
6047                         /* Count all modify-header actions as one action. */
6048                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6049                                 ++actions_n;
6050                         action_flags |= actions->type ==
6051                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
6052                                                 MLX5_FLOW_ACTION_SET_TTL :
6053                                                 MLX5_FLOW_ACTION_DEC_TTL;
6054                         rw_act_num += MLX5_ACT_NUM_MDF_TTL;
6055                         break;
6056                 case RTE_FLOW_ACTION_TYPE_JUMP:
6057                         ret = flow_dv_validate_action_jump(dev, tunnel, actions,
6058                                                            action_flags,
6059                                                            attr, external,
6060                                                            error);
6061                         if (ret)
6062                                 return ret;
6063                         ++actions_n;
6064                         action_flags |= MLX5_FLOW_ACTION_JUMP;
6065                         break;
6066                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
6067                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
6068                         ret = flow_dv_validate_action_modify_tcp_seq
6069                                                                 (action_flags,
6070                                                                  actions,
6071                                                                  item_flags,
6072                                                                  error);
6073                         if (ret < 0)
6074                                 return ret;
6075                         /* Count all modify-header actions as one action. */
6076                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6077                                 ++actions_n;
6078                         action_flags |= actions->type ==
6079                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
6080                                                 MLX5_FLOW_ACTION_INC_TCP_SEQ :
6081                                                 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
6082                         rw_act_num += MLX5_ACT_NUM_MDF_TCPSEQ;
6083                         break;
6084                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
6085                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
6086                         ret = flow_dv_validate_action_modify_tcp_ack
6087                                                                 (action_flags,
6088                                                                  actions,
6089                                                                  item_flags,
6090                                                                  error);
6091                         if (ret < 0)
6092                                 return ret;
6093                         /* Count all modify-header actions as one action. */
6094                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6095                                 ++actions_n;
6096                         action_flags |= actions->type ==
6097                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
6098                                                 MLX5_FLOW_ACTION_INC_TCP_ACK :
6099                                                 MLX5_FLOW_ACTION_DEC_TCP_ACK;
6100                         rw_act_num += MLX5_ACT_NUM_MDF_TCPACK;
6101                         break;
6102                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
6103                         break;
6104                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
6105                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
6106                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
6107                         break;
6108                 case RTE_FLOW_ACTION_TYPE_METER:
6109                         ret = mlx5_flow_validate_action_meter(dev,
6110                                                               action_flags,
6111                                                               actions, attr,
6112                                                               error);
6113                         if (ret < 0)
6114                                 return ret;
6115                         action_flags |= MLX5_FLOW_ACTION_METER;
6116                         ++actions_n;
6117                         /* Meter action will add one more TAG action. */
6118                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
6119                         break;
6120                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
6121                         if (!attr->transfer && !attr->group)
6122                                 return rte_flow_error_set(error, ENOTSUP,
6123                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6124                                                                            NULL,
6125                           "Shared ASO age action is not supported for group 0");
6126                         action_flags |= MLX5_FLOW_ACTION_AGE;
6127                         ++actions_n;
6128                         break;
6129                 case RTE_FLOW_ACTION_TYPE_AGE:
6130                         ret = flow_dv_validate_action_age(action_flags,
6131                                                           actions, dev,
6132                                                           error);
6133                         if (ret < 0)
6134                                 return ret;
6135                         action_flags |= MLX5_FLOW_ACTION_AGE;
6136                         ++actions_n;
6137                         break;
6138                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
6139                         ret = flow_dv_validate_action_modify_ipv4_dscp
6140                                                          (action_flags,
6141                                                           actions,
6142                                                           item_flags,
6143                                                           error);
6144                         if (ret < 0)
6145                                 return ret;
6146                         /* Count all modify-header actions as one action. */
6147                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6148                                 ++actions_n;
6149                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
6150                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
6151                         break;
6152                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
6153                         ret = flow_dv_validate_action_modify_ipv6_dscp
6154                                                                 (action_flags,
6155                                                                  actions,
6156                                                                  item_flags,
6157                                                                  error);
6158                         if (ret < 0)
6159                                 return ret;
6160                         /* Count all modify-header actions as one action. */
6161                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6162                                 ++actions_n;
6163                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
6164                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
6165                         break;
6166                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
6167                         ret = flow_dv_validate_action_sample(&action_flags,
6168                                                              actions, dev,
6169                                                              attr, item_flags,
6170                                                              rss, &sample_rss,
6171                                                              error);
6172                         if (ret < 0)
6173                                 return ret;
6174                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
6175                         ++actions_n;
6176                         break;
6177                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
6178                         if (actions[0].type != (typeof(actions[0].type))
6179                                 MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET)
6180                                 return rte_flow_error_set
6181                                                 (error, EINVAL,
6182                                                 RTE_FLOW_ERROR_TYPE_ACTION,
6183                                                 NULL, "MLX5 private action "
6184                                                 "must be the first");
6185
6186                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
6187                         break;
6188                 default:
6189                         return rte_flow_error_set(error, ENOTSUP,
6190                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6191                                                   actions,
6192                                                   "action not supported");
6193                 }
6194         }
6195         /*
6196          * Validate actions in flow rules
6197          * - Explicit decap action is prohibited by the tunnel offload API.
6198          * - Drop action in tunnel steer rule is prohibited by the API.
6199          * - Application cannot use MARK action because it's value can mask
6200          *   tunnel default miss nitification.
6201          * - JUMP in tunnel match rule has no support in current PMD
6202          *   implementation.
6203          * - TAG & META are reserved for future uses.
6204          */
6205         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_SET) {
6206                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_DECAP    |
6207                                             MLX5_FLOW_ACTION_MARK     |
6208                                             MLX5_FLOW_ACTION_SET_TAG  |
6209                                             MLX5_FLOW_ACTION_SET_META |
6210                                             MLX5_FLOW_ACTION_DROP;
6211
6212                 if (action_flags & bad_actions_mask)
6213                         return rte_flow_error_set
6214                                         (error, EINVAL,
6215                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6216                                         "Invalid RTE action in tunnel "
6217                                         "set decap rule");
6218                 if (!(action_flags & MLX5_FLOW_ACTION_JUMP))
6219                         return rte_flow_error_set
6220                                         (error, EINVAL,
6221                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6222                                         "tunnel set decap rule must terminate "
6223                                         "with JUMP");
6224                 if (!attr->ingress)
6225                         return rte_flow_error_set
6226                                         (error, EINVAL,
6227                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6228                                         "tunnel flows for ingress traffic only");
6229         }
6230         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_MATCH) {
6231                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_JUMP    |
6232                                             MLX5_FLOW_ACTION_MARK    |
6233                                             MLX5_FLOW_ACTION_SET_TAG |
6234                                             MLX5_FLOW_ACTION_SET_META;
6235
6236                 if (action_flags & bad_actions_mask)
6237                         return rte_flow_error_set
6238                                         (error, EINVAL,
6239                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6240                                         "Invalid RTE action in tunnel "
6241                                         "set match rule");
6242         }
6243         /*
6244          * Validate the drop action mutual exclusion with other actions.
6245          * Drop action is mutually-exclusive with any other action, except for
6246          * Count action.
6247          * Drop action compatibility with tunnel offload was already validated.
6248          */
6249         if (action_flags & (MLX5_FLOW_ACTION_TUNNEL_MATCH |
6250                             MLX5_FLOW_ACTION_TUNNEL_MATCH));
6251         else if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
6252             (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
6253                 return rte_flow_error_set(error, EINVAL,
6254                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6255                                           "Drop action is mutually-exclusive "
6256                                           "with any other action, except for "
6257                                           "Count action");
6258         /* Eswitch has few restrictions on using items and actions */
6259         if (attr->transfer) {
6260                 if (!mlx5_flow_ext_mreg_supported(dev) &&
6261                     action_flags & MLX5_FLOW_ACTION_FLAG)
6262                         return rte_flow_error_set(error, ENOTSUP,
6263                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6264                                                   NULL,
6265                                                   "unsupported action FLAG");
6266                 if (!mlx5_flow_ext_mreg_supported(dev) &&
6267                     action_flags & MLX5_FLOW_ACTION_MARK)
6268                         return rte_flow_error_set(error, ENOTSUP,
6269                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6270                                                   NULL,
6271                                                   "unsupported action MARK");
6272                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
6273                         return rte_flow_error_set(error, ENOTSUP,
6274                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6275                                                   NULL,
6276                                                   "unsupported action QUEUE");
6277                 if (action_flags & MLX5_FLOW_ACTION_RSS)
6278                         return rte_flow_error_set(error, ENOTSUP,
6279                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6280                                                   NULL,
6281                                                   "unsupported action RSS");
6282                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
6283                         return rte_flow_error_set(error, EINVAL,
6284                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6285                                                   actions,
6286                                                   "no fate action is found");
6287         } else {
6288                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
6289                         return rte_flow_error_set(error, EINVAL,
6290                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6291                                                   actions,
6292                                                   "no fate action is found");
6293         }
6294         /*
6295          * Continue validation for Xcap and VLAN actions.
6296          * If hairpin is working in explicit TX rule mode, there is no actions
6297          * splitting and the validation of hairpin ingress flow should be the
6298          * same as other standard flows.
6299          */
6300         if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS |
6301                              MLX5_FLOW_VLAN_ACTIONS)) &&
6302             (queue_index == 0xFFFF ||
6303              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN ||
6304              ((conf = mlx5_rxq_get_hairpin_conf(dev, queue_index)) != NULL &&
6305              conf->tx_explicit != 0))) {
6306                 if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
6307                     MLX5_FLOW_XCAP_ACTIONS)
6308                         return rte_flow_error_set(error, ENOTSUP,
6309                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6310                                                   NULL, "encap and decap "
6311                                                   "combination aren't supported");
6312                 if (!attr->transfer && attr->ingress) {
6313                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
6314                                 return rte_flow_error_set
6315                                                 (error, ENOTSUP,
6316                                                  RTE_FLOW_ERROR_TYPE_ACTION,
6317                                                  NULL, "encap is not supported"
6318                                                  " for ingress traffic");
6319                         else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
6320                                 return rte_flow_error_set
6321                                                 (error, ENOTSUP,
6322                                                  RTE_FLOW_ERROR_TYPE_ACTION,
6323                                                  NULL, "push VLAN action not "
6324                                                  "supported for ingress");
6325                         else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) ==
6326                                         MLX5_FLOW_VLAN_ACTIONS)
6327                                 return rte_flow_error_set
6328                                                 (error, ENOTSUP,
6329                                                  RTE_FLOW_ERROR_TYPE_ACTION,
6330                                                  NULL, "no support for "
6331                                                  "multiple VLAN actions");
6332                 }
6333         }
6334         /*
6335          * Hairpin flow will add one more TAG action in TX implicit mode.
6336          * In TX explicit mode, there will be no hairpin flow ID.
6337          */
6338         if (hairpin > 0)
6339                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
6340         /* extra metadata enabled: one more TAG action will be add. */
6341         if (dev_conf->dv_flow_en &&
6342             dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
6343             mlx5_flow_ext_mreg_supported(dev))
6344                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
6345         if ((uint32_t)rw_act_num >
6346                         flow_dv_modify_hdr_action_max(dev, is_root)) {
6347                 return rte_flow_error_set(error, ENOTSUP,
6348                                           RTE_FLOW_ERROR_TYPE_ACTION,
6349                                           NULL, "too many header modify"
6350                                           " actions to support");
6351         }
6352         return 0;
6353 }
6354
6355 /**
6356  * Internal preparation function. Allocates the DV flow size,
6357  * this size is constant.
6358  *
6359  * @param[in] dev
6360  *   Pointer to the rte_eth_dev structure.
6361  * @param[in] attr
6362  *   Pointer to the flow attributes.
6363  * @param[in] items
6364  *   Pointer to the list of items.
6365  * @param[in] actions
6366  *   Pointer to the list of actions.
6367  * @param[out] error
6368  *   Pointer to the error structure.
6369  *
6370  * @return
6371  *   Pointer to mlx5_flow object on success,
6372  *   otherwise NULL and rte_errno is set.
6373  */
6374 static struct mlx5_flow *
6375 flow_dv_prepare(struct rte_eth_dev *dev,
6376                 const struct rte_flow_attr *attr __rte_unused,
6377                 const struct rte_flow_item items[] __rte_unused,
6378                 const struct rte_flow_action actions[] __rte_unused,
6379                 struct rte_flow_error *error)
6380 {
6381         uint32_t handle_idx = 0;
6382         struct mlx5_flow *dev_flow;
6383         struct mlx5_flow_handle *dev_handle;
6384         struct mlx5_priv *priv = dev->data->dev_private;
6385         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
6386
6387         MLX5_ASSERT(wks);
6388         /* In case of corrupting the memory. */
6389         if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
6390                 rte_flow_error_set(error, ENOSPC,
6391                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6392                                    "not free temporary device flow");
6393                 return NULL;
6394         }
6395         dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
6396                                    &handle_idx);
6397         if (!dev_handle) {
6398                 rte_flow_error_set(error, ENOMEM,
6399                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6400                                    "not enough memory to create flow handle");
6401                 return NULL;
6402         }
6403         MLX5_ASSERT(wks->flow_idx < RTE_DIM(wks->flows));
6404         dev_flow = &wks->flows[wks->flow_idx++];
6405         memset(dev_flow, 0, sizeof(*dev_flow));
6406         dev_flow->handle = dev_handle;
6407         dev_flow->handle_idx = handle_idx;
6408         /*
6409          * In some old rdma-core releases, before continuing, a check of the
6410          * length of matching parameter will be done at first. It needs to use
6411          * the length without misc4 param. If the flow has misc4 support, then
6412          * the length needs to be adjusted accordingly. Each param member is
6413          * aligned with a 64B boundary naturally.
6414          */
6415         dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param) -
6416                                   MLX5_ST_SZ_BYTES(fte_match_set_misc4);
6417         dev_flow->ingress = attr->ingress;
6418         dev_flow->dv.transfer = attr->transfer;
6419         return dev_flow;
6420 }
6421
6422 #ifdef RTE_LIBRTE_MLX5_DEBUG
6423 /**
6424  * Sanity check for match mask and value. Similar to check_valid_spec() in
6425  * kernel driver. If unmasked bit is present in value, it returns failure.
6426  *
6427  * @param match_mask
6428  *   pointer to match mask buffer.
6429  * @param match_value
6430  *   pointer to match value buffer.
6431  *
6432  * @return
6433  *   0 if valid, -EINVAL otherwise.
6434  */
6435 static int
6436 flow_dv_check_valid_spec(void *match_mask, void *match_value)
6437 {
6438         uint8_t *m = match_mask;
6439         uint8_t *v = match_value;
6440         unsigned int i;
6441
6442         for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
6443                 if (v[i] & ~m[i]) {
6444                         DRV_LOG(ERR,
6445                                 "match_value differs from match_criteria"
6446                                 " %p[%u] != %p[%u]",
6447                                 match_value, i, match_mask, i);
6448                         return -EINVAL;
6449                 }
6450         }
6451         return 0;
6452 }
6453 #endif
6454
6455 /**
6456  * Add match of ip_version.
6457  *
6458  * @param[in] group
6459  *   Flow group.
6460  * @param[in] headers_v
6461  *   Values header pointer.
6462  * @param[in] headers_m
6463  *   Masks header pointer.
6464  * @param[in] ip_version
6465  *   The IP version to set.
6466  */
6467 static inline void
6468 flow_dv_set_match_ip_version(uint32_t group,
6469                              void *headers_v,
6470                              void *headers_m,
6471                              uint8_t ip_version)
6472 {
6473         if (group == 0)
6474                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
6475         else
6476                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
6477                          ip_version);
6478         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version);
6479         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0);
6480         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0);
6481 }
6482
6483 /**
6484  * Add Ethernet item to matcher and to the value.
6485  *
6486  * @param[in, out] matcher
6487  *   Flow matcher.
6488  * @param[in, out] key
6489  *   Flow matcher value.
6490  * @param[in] item
6491  *   Flow pattern to translate.
6492  * @param[in] inner
6493  *   Item is inner pattern.
6494  */
6495 static void
6496 flow_dv_translate_item_eth(void *matcher, void *key,
6497                            const struct rte_flow_item *item, int inner,
6498                            uint32_t group)
6499 {
6500         const struct rte_flow_item_eth *eth_m = item->mask;
6501         const struct rte_flow_item_eth *eth_v = item->spec;
6502         const struct rte_flow_item_eth nic_mask = {
6503                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
6504                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
6505                 .type = RTE_BE16(0xffff),
6506                 .has_vlan = 0,
6507         };
6508         void *hdrs_m;
6509         void *hdrs_v;
6510         char *l24_v;
6511         unsigned int i;
6512
6513         if (!eth_v)
6514                 return;
6515         if (!eth_m)
6516                 eth_m = &nic_mask;
6517         if (inner) {
6518                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6519                                          inner_headers);
6520                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6521         } else {
6522                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6523                                          outer_headers);
6524                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6525         }
6526         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, dmac_47_16),
6527                &eth_m->dst, sizeof(eth_m->dst));
6528         /* The value must be in the range of the mask. */
6529         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, dmac_47_16);
6530         for (i = 0; i < sizeof(eth_m->dst); ++i)
6531                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
6532         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, smac_47_16),
6533                &eth_m->src, sizeof(eth_m->src));
6534         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, smac_47_16);
6535         /* The value must be in the range of the mask. */
6536         for (i = 0; i < sizeof(eth_m->dst); ++i)
6537                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
6538         /*
6539          * HW supports match on one Ethertype, the Ethertype following the last
6540          * VLAN tag of the packet (see PRM).
6541          * Set match on ethertype only if ETH header is not followed by VLAN.
6542          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
6543          * ethertype, and use ip_version field instead.
6544          * eCPRI over Ether layer will use type value 0xAEFE.
6545          */
6546         if (eth_m->type == 0xFFFF) {
6547                 /* Set cvlan_tag mask for any single\multi\un-tagged case. */
6548                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
6549                 switch (eth_v->type) {
6550                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
6551                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
6552                         return;
6553                 case RTE_BE16(RTE_ETHER_TYPE_QINQ):
6554                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
6555                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
6556                         return;
6557                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
6558                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
6559                         return;
6560                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
6561                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
6562                         return;
6563                 default:
6564                         break;
6565                 }
6566         }
6567         if (eth_m->has_vlan) {
6568                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
6569                 if (eth_v->has_vlan) {
6570                         /*
6571                          * Here, when also has_more_vlan field in VLAN item is
6572                          * not set, only single-tagged packets will be matched.
6573                          */
6574                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
6575                         return;
6576                 }
6577         }
6578         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
6579                  rte_be_to_cpu_16(eth_m->type));
6580         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
6581         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
6582 }
6583
6584 /**
6585  * Add VLAN item to matcher and to the value.
6586  *
6587  * @param[in, out] dev_flow
6588  *   Flow descriptor.
6589  * @param[in, out] matcher
6590  *   Flow matcher.
6591  * @param[in, out] key
6592  *   Flow matcher value.
6593  * @param[in] item
6594  *   Flow pattern to translate.
6595  * @param[in] inner
6596  *   Item is inner pattern.
6597  */
6598 static void
6599 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
6600                             void *matcher, void *key,
6601                             const struct rte_flow_item *item,
6602                             int inner, uint32_t group)
6603 {
6604         const struct rte_flow_item_vlan *vlan_m = item->mask;
6605         const struct rte_flow_item_vlan *vlan_v = item->spec;
6606         void *hdrs_m;
6607         void *hdrs_v;
6608         uint16_t tci_m;
6609         uint16_t tci_v;
6610
6611         if (inner) {
6612                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6613                                          inner_headers);
6614                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6615         } else {
6616                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6617                                          outer_headers);
6618                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6619                 /*
6620                  * This is workaround, masks are not supported,
6621                  * and pre-validated.
6622                  */
6623                 if (vlan_v)
6624                         dev_flow->handle->vf_vlan.tag =
6625                                         rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
6626         }
6627         /*
6628          * When VLAN item exists in flow, mark packet as tagged,
6629          * even if TCI is not specified.
6630          */
6631         if (!MLX5_GET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag)) {
6632                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
6633                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
6634         }
6635         if (!vlan_v)
6636                 return;
6637         if (!vlan_m)
6638                 vlan_m = &rte_flow_item_vlan_mask;
6639         tci_m = rte_be_to_cpu_16(vlan_m->tci);
6640         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
6641         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_vid, tci_m);
6642         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_vid, tci_v);
6643         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_cfi, tci_m >> 12);
6644         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_cfi, tci_v >> 12);
6645         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_prio, tci_m >> 13);
6646         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_prio, tci_v >> 13);
6647         /*
6648          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
6649          * ethertype, and use ip_version field instead.
6650          */
6651         if (vlan_m->inner_type == 0xFFFF) {
6652                 switch (vlan_v->inner_type) {
6653                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
6654                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
6655                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
6656                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
6657                         return;
6658                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
6659                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
6660                         return;
6661                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
6662                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
6663                         return;
6664                 default:
6665                         break;
6666                 }
6667         }
6668         if (vlan_m->has_more_vlan && vlan_v->has_more_vlan) {
6669                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
6670                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
6671                 /* Only one vlan_tag bit can be set. */
6672                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
6673                 return;
6674         }
6675         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
6676                  rte_be_to_cpu_16(vlan_m->inner_type));
6677         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, ethertype,
6678                  rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
6679 }
6680
6681 /**
6682  * Add IPV4 item to matcher and to the value.
6683  *
6684  * @param[in, out] matcher
6685  *   Flow matcher.
6686  * @param[in, out] key
6687  *   Flow matcher value.
6688  * @param[in] item
6689  *   Flow pattern to translate.
6690  * @param[in] inner
6691  *   Item is inner pattern.
6692  * @param[in] group
6693  *   The group to insert the rule.
6694  */
6695 static void
6696 flow_dv_translate_item_ipv4(void *matcher, void *key,
6697                             const struct rte_flow_item *item,
6698                             int inner, uint32_t group)
6699 {
6700         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
6701         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
6702         const struct rte_flow_item_ipv4 nic_mask = {
6703                 .hdr = {
6704                         .src_addr = RTE_BE32(0xffffffff),
6705                         .dst_addr = RTE_BE32(0xffffffff),
6706                         .type_of_service = 0xff,
6707                         .next_proto_id = 0xff,
6708                         .time_to_live = 0xff,
6709                 },
6710         };
6711         void *headers_m;
6712         void *headers_v;
6713         char *l24_m;
6714         char *l24_v;
6715         uint8_t tos;
6716
6717         if (inner) {
6718                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6719                                          inner_headers);
6720                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6721         } else {
6722                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6723                                          outer_headers);
6724                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6725         }
6726         flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
6727         if (!ipv4_v)
6728                 return;
6729         if (!ipv4_m)
6730                 ipv4_m = &nic_mask;
6731         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6732                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
6733         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6734                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
6735         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
6736         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
6737         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6738                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
6739         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6740                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
6741         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
6742         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
6743         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
6744         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
6745                  ipv4_m->hdr.type_of_service);
6746         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
6747         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
6748                  ipv4_m->hdr.type_of_service >> 2);
6749         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
6750         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
6751                  ipv4_m->hdr.next_proto_id);
6752         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6753                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
6754         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
6755                  ipv4_m->hdr.time_to_live);
6756         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
6757                  ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
6758         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
6759                  !!(ipv4_m->hdr.fragment_offset));
6760         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
6761                  !!(ipv4_v->hdr.fragment_offset & ipv4_m->hdr.fragment_offset));
6762 }
6763
6764 /**
6765  * Add IPV6 item to matcher and to the value.
6766  *
6767  * @param[in, out] matcher
6768  *   Flow matcher.
6769  * @param[in, out] key
6770  *   Flow matcher value.
6771  * @param[in] item
6772  *   Flow pattern to translate.
6773  * @param[in] inner
6774  *   Item is inner pattern.
6775  * @param[in] group
6776  *   The group to insert the rule.
6777  */
6778 static void
6779 flow_dv_translate_item_ipv6(void *matcher, void *key,
6780                             const struct rte_flow_item *item,
6781                             int inner, uint32_t group)
6782 {
6783         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
6784         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
6785         const struct rte_flow_item_ipv6 nic_mask = {
6786                 .hdr = {
6787                         .src_addr =
6788                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
6789                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
6790                         .dst_addr =
6791                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
6792                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
6793                         .vtc_flow = RTE_BE32(0xffffffff),
6794                         .proto = 0xff,
6795                         .hop_limits = 0xff,
6796                 },
6797         };
6798         void *headers_m;
6799         void *headers_v;
6800         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6801         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6802         char *l24_m;
6803         char *l24_v;
6804         uint32_t vtc_m;
6805         uint32_t vtc_v;
6806         int i;
6807         int size;
6808
6809         if (inner) {
6810                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6811                                          inner_headers);
6812                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6813         } else {
6814                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6815                                          outer_headers);
6816                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6817         }
6818         flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
6819         if (!ipv6_v)
6820                 return;
6821         if (!ipv6_m)
6822                 ipv6_m = &nic_mask;
6823         size = sizeof(ipv6_m->hdr.dst_addr);
6824         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6825                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
6826         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6827                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
6828         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
6829         for (i = 0; i < size; ++i)
6830                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
6831         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6832                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
6833         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6834                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
6835         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
6836         for (i = 0; i < size; ++i)
6837                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
6838         /* TOS. */
6839         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
6840         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
6841         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
6842         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
6843         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
6844         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
6845         /* Label. */
6846         if (inner) {
6847                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
6848                          vtc_m);
6849                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
6850                          vtc_v);
6851         } else {
6852                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
6853                          vtc_m);
6854                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
6855                          vtc_v);
6856         }
6857         /* Protocol. */
6858         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
6859                  ipv6_m->hdr.proto);
6860         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6861                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
6862         /* Hop limit. */
6863         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
6864                  ipv6_m->hdr.hop_limits);
6865         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
6866                  ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
6867         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
6868                  !!(ipv6_m->has_frag_ext));
6869         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
6870                  !!(ipv6_v->has_frag_ext & ipv6_m->has_frag_ext));
6871 }
6872
6873 /**
6874  * Add IPV6 fragment extension item to matcher and to the value.
6875  *
6876  * @param[in, out] matcher
6877  *   Flow matcher.
6878  * @param[in, out] key
6879  *   Flow matcher value.
6880  * @param[in] item
6881  *   Flow pattern to translate.
6882  * @param[in] inner
6883  *   Item is inner pattern.
6884  */
6885 static void
6886 flow_dv_translate_item_ipv6_frag_ext(void *matcher, void *key,
6887                                      const struct rte_flow_item *item,
6888                                      int inner)
6889 {
6890         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_m = item->mask;
6891         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_v = item->spec;
6892         const struct rte_flow_item_ipv6_frag_ext nic_mask = {
6893                 .hdr = {
6894                         .next_header = 0xff,
6895                         .frag_data = RTE_BE16(0xffff),
6896                 },
6897         };
6898         void *headers_m;
6899         void *headers_v;
6900
6901         if (inner) {
6902                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6903                                          inner_headers);
6904                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6905         } else {
6906                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6907                                          outer_headers);
6908                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6909         }
6910         /* IPv6 fragment extension item exists, so packet is IP fragment. */
6911         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
6912         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 1);
6913         if (!ipv6_frag_ext_v)
6914                 return;
6915         if (!ipv6_frag_ext_m)
6916                 ipv6_frag_ext_m = &nic_mask;
6917         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
6918                  ipv6_frag_ext_m->hdr.next_header);
6919         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6920                  ipv6_frag_ext_v->hdr.next_header &
6921                  ipv6_frag_ext_m->hdr.next_header);
6922 }
6923
6924 /**
6925  * Add TCP item to matcher and to the value.
6926  *
6927  * @param[in, out] matcher
6928  *   Flow matcher.
6929  * @param[in, out] key
6930  *   Flow matcher value.
6931  * @param[in] item
6932  *   Flow pattern to translate.
6933  * @param[in] inner
6934  *   Item is inner pattern.
6935  */
6936 static void
6937 flow_dv_translate_item_tcp(void *matcher, void *key,
6938                            const struct rte_flow_item *item,
6939                            int inner)
6940 {
6941         const struct rte_flow_item_tcp *tcp_m = item->mask;
6942         const struct rte_flow_item_tcp *tcp_v = item->spec;
6943         void *headers_m;
6944         void *headers_v;
6945
6946         if (inner) {
6947                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6948                                          inner_headers);
6949                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6950         } else {
6951                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6952                                          outer_headers);
6953                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6954         }
6955         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6956         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
6957         if (!tcp_v)
6958                 return;
6959         if (!tcp_m)
6960                 tcp_m = &rte_flow_item_tcp_mask;
6961         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
6962                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
6963         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
6964                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
6965         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
6966                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
6967         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
6968                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
6969         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
6970                  tcp_m->hdr.tcp_flags);
6971         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
6972                  (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
6973 }
6974
6975 /**
6976  * Add UDP item to matcher and to the value.
6977  *
6978  * @param[in, out] matcher
6979  *   Flow matcher.
6980  * @param[in, out] key
6981  *   Flow matcher value.
6982  * @param[in] item
6983  *   Flow pattern to translate.
6984  * @param[in] inner
6985  *   Item is inner pattern.
6986  */
6987 static void
6988 flow_dv_translate_item_udp(void *matcher, void *key,
6989                            const struct rte_flow_item *item,
6990                            int inner)
6991 {
6992         const struct rte_flow_item_udp *udp_m = item->mask;
6993         const struct rte_flow_item_udp *udp_v = item->spec;
6994         void *headers_m;
6995         void *headers_v;
6996
6997         if (inner) {
6998                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6999                                          inner_headers);
7000                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7001         } else {
7002                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7003                                          outer_headers);
7004                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7005         }
7006         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
7007         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
7008         if (!udp_v)
7009                 return;
7010         if (!udp_m)
7011                 udp_m = &rte_flow_item_udp_mask;
7012         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
7013                  rte_be_to_cpu_16(udp_m->hdr.src_port));
7014         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
7015                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
7016         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
7017                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
7018         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
7019                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
7020 }
7021
7022 /**
7023  * Add GRE optional Key item to matcher and to the value.
7024  *
7025  * @param[in, out] matcher
7026  *   Flow matcher.
7027  * @param[in, out] key
7028  *   Flow matcher value.
7029  * @param[in] item
7030  *   Flow pattern to translate.
7031  * @param[in] inner
7032  *   Item is inner pattern.
7033  */
7034 static void
7035 flow_dv_translate_item_gre_key(void *matcher, void *key,
7036                                    const struct rte_flow_item *item)
7037 {
7038         const rte_be32_t *key_m = item->mask;
7039         const rte_be32_t *key_v = item->spec;
7040         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7041         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7042         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
7043
7044         /* GRE K bit must be on and should already be validated */
7045         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
7046         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
7047         if (!key_v)
7048                 return;
7049         if (!key_m)
7050                 key_m = &gre_key_default_mask;
7051         MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
7052                  rte_be_to_cpu_32(*key_m) >> 8);
7053         MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
7054                  rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
7055         MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
7056                  rte_be_to_cpu_32(*key_m) & 0xFF);
7057         MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
7058                  rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
7059 }
7060
7061 /**
7062  * Add GRE item to matcher and to the value.
7063  *
7064  * @param[in, out] matcher
7065  *   Flow matcher.
7066  * @param[in, out] key
7067  *   Flow matcher value.
7068  * @param[in] item
7069  *   Flow pattern to translate.
7070  * @param[in] inner
7071  *   Item is inner pattern.
7072  */
7073 static void
7074 flow_dv_translate_item_gre(void *matcher, void *key,
7075                            const struct rte_flow_item *item,
7076                            int inner)
7077 {
7078         const struct rte_flow_item_gre *gre_m = item->mask;
7079         const struct rte_flow_item_gre *gre_v = item->spec;
7080         void *headers_m;
7081         void *headers_v;
7082         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7083         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7084         struct {
7085                 union {
7086                         __extension__
7087                         struct {
7088                                 uint16_t version:3;
7089                                 uint16_t rsvd0:9;
7090                                 uint16_t s_present:1;
7091                                 uint16_t k_present:1;
7092                                 uint16_t rsvd_bit1:1;
7093                                 uint16_t c_present:1;
7094                         };
7095                         uint16_t value;
7096                 };
7097         } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
7098
7099         if (inner) {
7100                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7101                                          inner_headers);
7102                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7103         } else {
7104                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7105                                          outer_headers);
7106                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7107         }
7108         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
7109         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
7110         if (!gre_v)
7111                 return;
7112         if (!gre_m)
7113                 gre_m = &rte_flow_item_gre_mask;
7114         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
7115                  rte_be_to_cpu_16(gre_m->protocol));
7116         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
7117                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
7118         gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
7119         gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
7120         MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
7121                  gre_crks_rsvd0_ver_m.c_present);
7122         MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
7123                  gre_crks_rsvd0_ver_v.c_present &
7124                  gre_crks_rsvd0_ver_m.c_present);
7125         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
7126                  gre_crks_rsvd0_ver_m.k_present);
7127         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
7128                  gre_crks_rsvd0_ver_v.k_present &
7129                  gre_crks_rsvd0_ver_m.k_present);
7130         MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
7131                  gre_crks_rsvd0_ver_m.s_present);
7132         MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
7133                  gre_crks_rsvd0_ver_v.s_present &
7134                  gre_crks_rsvd0_ver_m.s_present);
7135 }
7136
7137 /**
7138  * Add NVGRE item to matcher and to the value.
7139  *
7140  * @param[in, out] matcher
7141  *   Flow matcher.
7142  * @param[in, out] key
7143  *   Flow matcher value.
7144  * @param[in] item
7145  *   Flow pattern to translate.
7146  * @param[in] inner
7147  *   Item is inner pattern.
7148  */
7149 static void
7150 flow_dv_translate_item_nvgre(void *matcher, void *key,
7151                              const struct rte_flow_item *item,
7152                              int inner)
7153 {
7154         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
7155         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
7156         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7157         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7158         const char *tni_flow_id_m;
7159         const char *tni_flow_id_v;
7160         char *gre_key_m;
7161         char *gre_key_v;
7162         int size;
7163         int i;
7164
7165         /* For NVGRE, GRE header fields must be set with defined values. */
7166         const struct rte_flow_item_gre gre_spec = {
7167                 .c_rsvd0_ver = RTE_BE16(0x2000),
7168                 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
7169         };
7170         const struct rte_flow_item_gre gre_mask = {
7171                 .c_rsvd0_ver = RTE_BE16(0xB000),
7172                 .protocol = RTE_BE16(UINT16_MAX),
7173         };
7174         const struct rte_flow_item gre_item = {
7175                 .spec = &gre_spec,
7176                 .mask = &gre_mask,
7177                 .last = NULL,
7178         };
7179         flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
7180         if (!nvgre_v)
7181                 return;
7182         if (!nvgre_m)
7183                 nvgre_m = &rte_flow_item_nvgre_mask;
7184         tni_flow_id_m = (const char *)nvgre_m->tni;
7185         tni_flow_id_v = (const char *)nvgre_v->tni;
7186         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
7187         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
7188         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
7189         memcpy(gre_key_m, tni_flow_id_m, size);
7190         for (i = 0; i < size; ++i)
7191                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
7192 }
7193
7194 /**
7195  * Add VXLAN item to matcher and to the value.
7196  *
7197  * @param[in, out] matcher
7198  *   Flow matcher.
7199  * @param[in, out] key
7200  *   Flow matcher value.
7201  * @param[in] item
7202  *   Flow pattern to translate.
7203  * @param[in] inner
7204  *   Item is inner pattern.
7205  */
7206 static void
7207 flow_dv_translate_item_vxlan(void *matcher, void *key,
7208                              const struct rte_flow_item *item,
7209                              int inner)
7210 {
7211         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
7212         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
7213         void *headers_m;
7214         void *headers_v;
7215         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7216         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7217         char *vni_m;
7218         char *vni_v;
7219         uint16_t dport;
7220         int size;
7221         int i;
7222
7223         if (inner) {
7224                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7225                                          inner_headers);
7226                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7227         } else {
7228                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7229                                          outer_headers);
7230                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7231         }
7232         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
7233                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
7234         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7235                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7236                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7237         }
7238         if (!vxlan_v)
7239                 return;
7240         if (!vxlan_m)
7241                 vxlan_m = &rte_flow_item_vxlan_mask;
7242         size = sizeof(vxlan_m->vni);
7243         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
7244         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
7245         memcpy(vni_m, vxlan_m->vni, size);
7246         for (i = 0; i < size; ++i)
7247                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
7248 }
7249
7250 /**
7251  * Add VXLAN-GPE item to matcher and to the value.
7252  *
7253  * @param[in, out] matcher
7254  *   Flow matcher.
7255  * @param[in, out] key
7256  *   Flow matcher value.
7257  * @param[in] item
7258  *   Flow pattern to translate.
7259  * @param[in] inner
7260  *   Item is inner pattern.
7261  */
7262
7263 static void
7264 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
7265                                  const struct rte_flow_item *item, int inner)
7266 {
7267         const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
7268         const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
7269         void *headers_m;
7270         void *headers_v;
7271         void *misc_m =
7272                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
7273         void *misc_v =
7274                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7275         char *vni_m;
7276         char *vni_v;
7277         uint16_t dport;
7278         int size;
7279         int i;
7280         uint8_t flags_m = 0xff;
7281         uint8_t flags_v = 0xc;
7282
7283         if (inner) {
7284                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7285                                          inner_headers);
7286                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7287         } else {
7288                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7289                                          outer_headers);
7290                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7291         }
7292         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
7293                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
7294         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7295                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7296                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7297         }
7298         if (!vxlan_v)
7299                 return;
7300         if (!vxlan_m)
7301                 vxlan_m = &rte_flow_item_vxlan_gpe_mask;
7302         size = sizeof(vxlan_m->vni);
7303         vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
7304         vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
7305         memcpy(vni_m, vxlan_m->vni, size);
7306         for (i = 0; i < size; ++i)
7307                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
7308         if (vxlan_m->flags) {
7309                 flags_m = vxlan_m->flags;
7310                 flags_v = vxlan_v->flags;
7311         }
7312         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
7313         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
7314         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol,
7315                  vxlan_m->protocol);
7316         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol,
7317                  vxlan_v->protocol);
7318 }
7319
7320 /**
7321  * Add Geneve item to matcher and to the value.
7322  *
7323  * @param[in, out] matcher
7324  *   Flow matcher.
7325  * @param[in, out] key
7326  *   Flow matcher value.
7327  * @param[in] item
7328  *   Flow pattern to translate.
7329  * @param[in] inner
7330  *   Item is inner pattern.
7331  */
7332
7333 static void
7334 flow_dv_translate_item_geneve(void *matcher, void *key,
7335                               const struct rte_flow_item *item, int inner)
7336 {
7337         const struct rte_flow_item_geneve *geneve_m = item->mask;
7338         const struct rte_flow_item_geneve *geneve_v = item->spec;
7339         void *headers_m;
7340         void *headers_v;
7341         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7342         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7343         uint16_t dport;
7344         uint16_t gbhdr_m;
7345         uint16_t gbhdr_v;
7346         char *vni_m;
7347         char *vni_v;
7348         size_t size, i;
7349
7350         if (inner) {
7351                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7352                                          inner_headers);
7353                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7354         } else {
7355                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7356                                          outer_headers);
7357                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7358         }
7359         dport = MLX5_UDP_PORT_GENEVE;
7360         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7361                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7362                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7363         }
7364         if (!geneve_v)
7365                 return;
7366         if (!geneve_m)
7367                 geneve_m = &rte_flow_item_geneve_mask;
7368         size = sizeof(geneve_m->vni);
7369         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
7370         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
7371         memcpy(vni_m, geneve_m->vni, size);
7372         for (i = 0; i < size; ++i)
7373                 vni_v[i] = vni_m[i] & geneve_v->vni[i];
7374         MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
7375                  rte_be_to_cpu_16(geneve_m->protocol));
7376         MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
7377                  rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
7378         gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
7379         gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
7380         MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
7381                  MLX5_GENEVE_OAMF_VAL(gbhdr_m));
7382         MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
7383                  MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
7384         MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
7385                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
7386         MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
7387                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
7388                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
7389 }
7390
7391 /**
7392  * Create Geneve TLV option resource.
7393  *
7394  * @param dev[in, out]
7395  *   Pointer to rte_eth_dev structure.
7396  * @param[in, out] tag_be24
7397  *   Tag value in big endian then R-shift 8.
7398  * @parm[in, out] dev_flow
7399  *   Pointer to the dev_flow.
7400  * @param[out] error
7401  *   pointer to error structure.
7402  *
7403  * @return
7404  *   0 on success otherwise -errno and errno is set.
7405  */
7406
7407 int
7408 flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev,
7409                                              const struct rte_flow_item *item,
7410                                              struct rte_flow_error *error)
7411 {
7412         struct mlx5_priv *priv = dev->data->dev_private;
7413         struct mlx5_dev_ctx_shared *sh = priv->sh;
7414         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
7415                         sh->geneve_tlv_option_resource;
7416         struct mlx5_devx_obj *obj;
7417         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
7418         int ret = 0;
7419
7420         if (!geneve_opt_v)
7421                 return -1;
7422         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
7423         if (geneve_opt_resource != NULL) {
7424                 if (geneve_opt_resource->option_class ==
7425                         geneve_opt_v->option_class &&
7426                         geneve_opt_resource->option_type ==
7427                         geneve_opt_v->option_type &&
7428                         geneve_opt_resource->length ==
7429                         geneve_opt_v->option_len) {
7430                         /* We already have GENVE TLV option obj allocated. */
7431                         __atomic_fetch_add(&geneve_opt_resource->refcnt, 1,
7432                                            __ATOMIC_RELAXED);
7433                 } else {
7434                         ret = rte_flow_error_set(error, ENOMEM,
7435                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7436                                 "Only one GENEVE TLV option supported");
7437                         goto exit;
7438                 }
7439         } else {
7440                 /* Create a GENEVE TLV object and resource. */
7441                 obj = mlx5_devx_cmd_create_geneve_tlv_option(sh->ctx,
7442                                 geneve_opt_v->option_class,
7443                                 geneve_opt_v->option_type,
7444                                 geneve_opt_v->option_len);
7445                 if (!obj) {
7446                         ret = rte_flow_error_set(error, ENODATA,
7447                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7448                                 "Failed to create GENEVE TLV Devx object");
7449                         goto exit;
7450                 }
7451                 sh->geneve_tlv_option_resource =
7452                                 mlx5_malloc(MLX5_MEM_ZERO,
7453                                                 sizeof(*geneve_opt_resource),
7454                                                 0, SOCKET_ID_ANY);
7455                 if (!sh->geneve_tlv_option_resource) {
7456                         claim_zero(mlx5_devx_cmd_destroy(obj));
7457                         ret = rte_flow_error_set(error, ENOMEM,
7458                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7459                                 "GENEVE TLV object memory allocation failed");
7460                         goto exit;
7461                 }
7462                 geneve_opt_resource = sh->geneve_tlv_option_resource;
7463                 geneve_opt_resource->obj = obj;
7464                 geneve_opt_resource->option_class = geneve_opt_v->option_class;
7465                 geneve_opt_resource->option_type = geneve_opt_v->option_type;
7466                 geneve_opt_resource->length = geneve_opt_v->option_len;
7467                 __atomic_store_n(&geneve_opt_resource->refcnt, 1,
7468                                 __ATOMIC_RELAXED);
7469         }
7470 exit:
7471         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
7472         return ret;
7473 }
7474
7475 /**
7476  * Add Geneve TLV option item to matcher.
7477  *
7478  * @param[in, out] dev
7479  *   Pointer to rte_eth_dev structure.
7480  * @param[in, out] matcher
7481  *   Flow matcher.
7482  * @param[in, out] key
7483  *   Flow matcher value.
7484  * @param[in] item
7485  *   Flow pattern to translate.
7486  * @param[out] error
7487  *   Pointer to error structure.
7488  */
7489 static int
7490 flow_dv_translate_item_geneve_opt(struct rte_eth_dev *dev, void *matcher,
7491                                   void *key, const struct rte_flow_item *item,
7492                                   struct rte_flow_error *error)
7493 {
7494         const struct rte_flow_item_geneve_opt *geneve_opt_m = item->mask;
7495         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
7496         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7497         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7498         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
7499                         misc_parameters_3);
7500         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7501         rte_be32_t opt_data_key = 0, opt_data_mask = 0;
7502         int ret = 0;
7503
7504         if (!geneve_opt_v)
7505                 return -1;
7506         if (!geneve_opt_m)
7507                 geneve_opt_m = &rte_flow_item_geneve_opt_mask;
7508         ret = flow_dev_geneve_tlv_option_resource_register(dev, item,
7509                                                            error);
7510         if (ret) {
7511                 DRV_LOG(ERR, "Failed to create geneve_tlv_obj");
7512                 return ret;
7513         }
7514         /*
7515          * Set the option length in GENEVE header if not requested.
7516          * The GENEVE TLV option length is expressed by the option length field
7517          * in the GENEVE header.
7518          * If the option length was not requested but the GENEVE TLV option item
7519          * is present we set the option length field implicitly.
7520          */
7521         if (!MLX5_GET16(fte_match_set_misc, misc_m, geneve_opt_len)) {
7522                 MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
7523                          MLX5_GENEVE_OPTLEN_MASK);
7524                 MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
7525                          geneve_opt_v->option_len + 1);
7526         }
7527         /* Set the data. */
7528         if (geneve_opt_v->data) {
7529                 memcpy(&opt_data_key, geneve_opt_v->data,
7530                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
7531                                 sizeof(opt_data_key)));
7532                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
7533                                 sizeof(opt_data_key));
7534                 memcpy(&opt_data_mask, geneve_opt_m->data,
7535                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
7536                                 sizeof(opt_data_mask)));
7537                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
7538                                 sizeof(opt_data_mask));
7539                 MLX5_SET(fte_match_set_misc3, misc3_m,
7540                                 geneve_tlv_option_0_data,
7541                                 rte_be_to_cpu_32(opt_data_mask));
7542                 MLX5_SET(fte_match_set_misc3, misc3_v,
7543                                 geneve_tlv_option_0_data,
7544                         rte_be_to_cpu_32(opt_data_key & opt_data_mask));
7545         }
7546         return ret;
7547 }
7548
7549 /**
7550  * Add MPLS item to matcher and to the value.
7551  *
7552  * @param[in, out] matcher
7553  *   Flow matcher.
7554  * @param[in, out] key
7555  *   Flow matcher value.
7556  * @param[in] item
7557  *   Flow pattern to translate.
7558  * @param[in] prev_layer
7559  *   The protocol layer indicated in previous item.
7560  * @param[in] inner
7561  *   Item is inner pattern.
7562  */
7563 static void
7564 flow_dv_translate_item_mpls(void *matcher, void *key,
7565                             const struct rte_flow_item *item,
7566                             uint64_t prev_layer,
7567                             int inner)
7568 {
7569         const uint32_t *in_mpls_m = item->mask;
7570         const uint32_t *in_mpls_v = item->spec;
7571         uint32_t *out_mpls_m = 0;
7572         uint32_t *out_mpls_v = 0;
7573         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7574         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7575         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
7576                                      misc_parameters_2);
7577         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
7578         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
7579         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7580
7581         switch (prev_layer) {
7582         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
7583                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
7584                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
7585                          MLX5_UDP_PORT_MPLS);
7586                 break;
7587         case MLX5_FLOW_LAYER_GRE:
7588                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
7589                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
7590                          RTE_ETHER_TYPE_MPLS);
7591                 break;
7592         default:
7593                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
7594                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
7595                          IPPROTO_MPLS);
7596                 break;
7597         }
7598         if (!in_mpls_v)
7599                 return;
7600         if (!in_mpls_m)
7601                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
7602         switch (prev_layer) {
7603         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
7604                 out_mpls_m =
7605                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
7606                                                  outer_first_mpls_over_udp);
7607                 out_mpls_v =
7608                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
7609                                                  outer_first_mpls_over_udp);
7610                 break;
7611         case MLX5_FLOW_LAYER_GRE:
7612                 out_mpls_m =
7613                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
7614                                                  outer_first_mpls_over_gre);
7615                 out_mpls_v =
7616                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
7617                                                  outer_first_mpls_over_gre);
7618                 break;
7619         default:
7620                 /* Inner MPLS not over GRE is not supported. */
7621                 if (!inner) {
7622                         out_mpls_m =
7623                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
7624                                                          misc2_m,
7625                                                          outer_first_mpls);
7626                         out_mpls_v =
7627                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
7628                                                          misc2_v,
7629                                                          outer_first_mpls);
7630                 }
7631                 break;
7632         }
7633         if (out_mpls_m && out_mpls_v) {
7634                 *out_mpls_m = *in_mpls_m;
7635                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
7636         }
7637 }
7638
7639 /**
7640  * Add metadata register item to matcher
7641  *
7642  * @param[in, out] matcher
7643  *   Flow matcher.
7644  * @param[in, out] key
7645  *   Flow matcher value.
7646  * @param[in] reg_type
7647  *   Type of device metadata register
7648  * @param[in] value
7649  *   Register value
7650  * @param[in] mask
7651  *   Register mask
7652  */
7653 static void
7654 flow_dv_match_meta_reg(void *matcher, void *key,
7655                        enum modify_reg reg_type,
7656                        uint32_t data, uint32_t mask)
7657 {
7658         void *misc2_m =
7659                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
7660         void *misc2_v =
7661                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
7662         uint32_t temp;
7663
7664         data &= mask;
7665         switch (reg_type) {
7666         case REG_A:
7667                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
7668                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
7669                 break;
7670         case REG_B:
7671                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
7672                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
7673                 break;
7674         case REG_C_0:
7675                 /*
7676                  * The metadata register C0 field might be divided into
7677                  * source vport index and META item value, we should set
7678                  * this field according to specified mask, not as whole one.
7679                  */
7680                 temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
7681                 temp |= mask;
7682                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
7683                 temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
7684                 temp &= ~mask;
7685                 temp |= data;
7686                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
7687                 break;
7688         case REG_C_1:
7689                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
7690                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
7691                 break;
7692         case REG_C_2:
7693                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
7694                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
7695                 break;
7696         case REG_C_3:
7697                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
7698                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
7699                 break;
7700         case REG_C_4:
7701                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
7702                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
7703                 break;
7704         case REG_C_5:
7705                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
7706                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
7707                 break;
7708         case REG_C_6:
7709                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
7710                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
7711                 break;
7712         case REG_C_7:
7713                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
7714                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
7715                 break;
7716         default:
7717                 MLX5_ASSERT(false);
7718                 break;
7719         }
7720 }
7721
7722 /**
7723  * Add MARK item to matcher
7724  *
7725  * @param[in] dev
7726  *   The device to configure through.
7727  * @param[in, out] matcher
7728  *   Flow matcher.
7729  * @param[in, out] key
7730  *   Flow matcher value.
7731  * @param[in] item
7732  *   Flow pattern to translate.
7733  */
7734 static void
7735 flow_dv_translate_item_mark(struct rte_eth_dev *dev,
7736                             void *matcher, void *key,
7737                             const struct rte_flow_item *item)
7738 {
7739         struct mlx5_priv *priv = dev->data->dev_private;
7740         const struct rte_flow_item_mark *mark;
7741         uint32_t value;
7742         uint32_t mask;
7743
7744         mark = item->mask ? (const void *)item->mask :
7745                             &rte_flow_item_mark_mask;
7746         mask = mark->id & priv->sh->dv_mark_mask;
7747         mark = (const void *)item->spec;
7748         MLX5_ASSERT(mark);
7749         value = mark->id & priv->sh->dv_mark_mask & mask;
7750         if (mask) {
7751                 enum modify_reg reg;
7752
7753                 /* Get the metadata register index for the mark. */
7754                 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
7755                 MLX5_ASSERT(reg > 0);
7756                 if (reg == REG_C_0) {
7757                         struct mlx5_priv *priv = dev->data->dev_private;
7758                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
7759                         uint32_t shl_c0 = rte_bsf32(msk_c0);
7760
7761                         mask &= msk_c0;
7762                         mask <<= shl_c0;
7763                         value <<= shl_c0;
7764                 }
7765                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
7766         }
7767 }
7768
7769 /**
7770  * Add META item to matcher
7771  *
7772  * @param[in] dev
7773  *   The devich to configure through.
7774  * @param[in, out] matcher
7775  *   Flow matcher.
7776  * @param[in, out] key
7777  *   Flow matcher value.
7778  * @param[in] attr
7779  *   Attributes of flow that includes this item.
7780  * @param[in] item
7781  *   Flow pattern to translate.
7782  */
7783 static void
7784 flow_dv_translate_item_meta(struct rte_eth_dev *dev,
7785                             void *matcher, void *key,
7786                             const struct rte_flow_attr *attr,
7787                             const struct rte_flow_item *item)
7788 {
7789         const struct rte_flow_item_meta *meta_m;
7790         const struct rte_flow_item_meta *meta_v;
7791
7792         meta_m = (const void *)item->mask;
7793         if (!meta_m)
7794                 meta_m = &rte_flow_item_meta_mask;
7795         meta_v = (const void *)item->spec;
7796         if (meta_v) {
7797                 int reg;
7798                 uint32_t value = meta_v->data;
7799                 uint32_t mask = meta_m->data;
7800
7801                 reg = flow_dv_get_metadata_reg(dev, attr, NULL);
7802                 if (reg < 0)
7803                         return;
7804                 MLX5_ASSERT(reg != REG_NON);
7805                 /*
7806                  * In datapath code there is no endianness
7807                  * coversions for perfromance reasons, all
7808                  * pattern conversions are done in rte_flow.
7809                  */
7810                 value = rte_cpu_to_be_32(value);
7811                 mask = rte_cpu_to_be_32(mask);
7812                 if (reg == REG_C_0) {
7813                         struct mlx5_priv *priv = dev->data->dev_private;
7814                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
7815                         uint32_t shl_c0 = rte_bsf32(msk_c0);
7816 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
7817                         uint32_t shr_c0 = __builtin_clz(priv->sh->dv_meta_mask);
7818
7819                         value >>= shr_c0;
7820                         mask >>= shr_c0;
7821 #endif
7822                         value <<= shl_c0;
7823                         mask <<= shl_c0;
7824                         MLX5_ASSERT(msk_c0);
7825                         MLX5_ASSERT(!(~msk_c0 & mask));
7826                 }
7827                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
7828         }
7829 }
7830
7831 /**
7832  * Add vport metadata Reg C0 item to matcher
7833  *
7834  * @param[in, out] matcher
7835  *   Flow matcher.
7836  * @param[in, out] key
7837  *   Flow matcher value.
7838  * @param[in] reg
7839  *   Flow pattern to translate.
7840  */
7841 static void
7842 flow_dv_translate_item_meta_vport(void *matcher, void *key,
7843                                   uint32_t value, uint32_t mask)
7844 {
7845         flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
7846 }
7847
7848 /**
7849  * Add tag item to matcher
7850  *
7851  * @param[in] dev
7852  *   The devich to configure through.
7853  * @param[in, out] matcher
7854  *   Flow matcher.
7855  * @param[in, out] key
7856  *   Flow matcher value.
7857  * @param[in] item
7858  *   Flow pattern to translate.
7859  */
7860 static void
7861 flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
7862                                 void *matcher, void *key,
7863                                 const struct rte_flow_item *item)
7864 {
7865         const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
7866         const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
7867         uint32_t mask, value;
7868
7869         MLX5_ASSERT(tag_v);
7870         value = tag_v->data;
7871         mask = tag_m ? tag_m->data : UINT32_MAX;
7872         if (tag_v->id == REG_C_0) {
7873                 struct mlx5_priv *priv = dev->data->dev_private;
7874                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
7875                 uint32_t shl_c0 = rte_bsf32(msk_c0);
7876
7877                 mask &= msk_c0;
7878                 mask <<= shl_c0;
7879                 value <<= shl_c0;
7880         }
7881         flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
7882 }
7883
7884 /**
7885  * Add TAG item to matcher
7886  *
7887  * @param[in] dev
7888  *   The devich to configure through.
7889  * @param[in, out] matcher
7890  *   Flow matcher.
7891  * @param[in, out] key
7892  *   Flow matcher value.
7893  * @param[in] item
7894  *   Flow pattern to translate.
7895  */
7896 static void
7897 flow_dv_translate_item_tag(struct rte_eth_dev *dev,
7898                            void *matcher, void *key,
7899                            const struct rte_flow_item *item)
7900 {
7901         const struct rte_flow_item_tag *tag_v = item->spec;
7902         const struct rte_flow_item_tag *tag_m = item->mask;
7903         enum modify_reg reg;
7904
7905         MLX5_ASSERT(tag_v);
7906         tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
7907         /* Get the metadata register index for the tag. */
7908         reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
7909         MLX5_ASSERT(reg > 0);
7910         flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
7911 }
7912
7913 /**
7914  * Add source vport match to the specified matcher.
7915  *
7916  * @param[in, out] matcher
7917  *   Flow matcher.
7918  * @param[in, out] key
7919  *   Flow matcher value.
7920  * @param[in] port
7921  *   Source vport value to match
7922  * @param[in] mask
7923  *   Mask
7924  */
7925 static void
7926 flow_dv_translate_item_source_vport(void *matcher, void *key,
7927                                     int16_t port, uint16_t mask)
7928 {
7929         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7930         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7931
7932         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
7933         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
7934 }
7935
7936 /**
7937  * Translate port-id item to eswitch match on  port-id.
7938  *
7939  * @param[in] dev
7940  *   The devich to configure through.
7941  * @param[in, out] matcher
7942  *   Flow matcher.
7943  * @param[in, out] key
7944  *   Flow matcher value.
7945  * @param[in] item
7946  *   Flow pattern to translate.
7947  * @param[in]
7948  *   Flow attributes.
7949  *
7950  * @return
7951  *   0 on success, a negative errno value otherwise.
7952  */
7953 static int
7954 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
7955                                void *key, const struct rte_flow_item *item,
7956                                const struct rte_flow_attr *attr)
7957 {
7958         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
7959         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
7960         struct mlx5_priv *priv;
7961         uint16_t mask, id;
7962
7963         mask = pid_m ? pid_m->id : 0xffff;
7964         id = pid_v ? pid_v->id : dev->data->port_id;
7965         priv = mlx5_port_to_eswitch_info(id, item == NULL);
7966         if (!priv)
7967                 return -rte_errno;
7968         /*
7969          * Translate to vport field or to metadata, depending on mode.
7970          * Kernel can use either misc.source_port or half of C0 metadata
7971          * register.
7972          */
7973         if (priv->vport_meta_mask) {
7974                 /*
7975                  * Provide the hint for SW steering library
7976                  * to insert the flow into ingress domain and
7977                  * save the extra vport match.
7978                  */
7979                 if (mask == 0xffff && priv->vport_id == 0xffff &&
7980                     priv->pf_bond < 0 && attr->transfer)
7981                         flow_dv_translate_item_source_vport
7982                                 (matcher, key, priv->vport_id, mask);
7983                 else
7984                         flow_dv_translate_item_meta_vport
7985                                 (matcher, key,
7986                                  priv->vport_meta_tag,
7987                                  priv->vport_meta_mask);
7988         } else {
7989                 flow_dv_translate_item_source_vport(matcher, key,
7990                                                     priv->vport_id, mask);
7991         }
7992         return 0;
7993 }
7994
7995 /**
7996  * Add ICMP6 item to matcher and to the value.
7997  *
7998  * @param[in, out] matcher
7999  *   Flow matcher.
8000  * @param[in, out] key
8001  *   Flow matcher value.
8002  * @param[in] item
8003  *   Flow pattern to translate.
8004  * @param[in] inner
8005  *   Item is inner pattern.
8006  */
8007 static void
8008 flow_dv_translate_item_icmp6(void *matcher, void *key,
8009                               const struct rte_flow_item *item,
8010                               int inner)
8011 {
8012         const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
8013         const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
8014         void *headers_m;
8015         void *headers_v;
8016         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
8017                                      misc_parameters_3);
8018         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
8019         if (inner) {
8020                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8021                                          inner_headers);
8022                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8023         } else {
8024                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8025                                          outer_headers);
8026                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8027         }
8028         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
8029         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
8030         if (!icmp6_v)
8031                 return;
8032         if (!icmp6_m)
8033                 icmp6_m = &rte_flow_item_icmp6_mask;
8034         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
8035         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
8036                  icmp6_v->type & icmp6_m->type);
8037         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
8038         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
8039                  icmp6_v->code & icmp6_m->code);
8040 }
8041
8042 /**
8043  * Add ICMP item to matcher and to the value.
8044  *
8045  * @param[in, out] matcher
8046  *   Flow matcher.
8047  * @param[in, out] key
8048  *   Flow matcher value.
8049  * @param[in] item
8050  *   Flow pattern to translate.
8051  * @param[in] inner
8052  *   Item is inner pattern.
8053  */
8054 static void
8055 flow_dv_translate_item_icmp(void *matcher, void *key,
8056                             const struct rte_flow_item *item,
8057                             int inner)
8058 {
8059         const struct rte_flow_item_icmp *icmp_m = item->mask;
8060         const struct rte_flow_item_icmp *icmp_v = item->spec;
8061         uint32_t icmp_header_data_m = 0;
8062         uint32_t icmp_header_data_v = 0;
8063         void *headers_m;
8064         void *headers_v;
8065         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
8066                                      misc_parameters_3);
8067         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
8068         if (inner) {
8069                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8070                                          inner_headers);
8071                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8072         } else {
8073                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8074                                          outer_headers);
8075                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8076         }
8077         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
8078         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
8079         if (!icmp_v)
8080                 return;
8081         if (!icmp_m)
8082                 icmp_m = &rte_flow_item_icmp_mask;
8083         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
8084                  icmp_m->hdr.icmp_type);
8085         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
8086                  icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
8087         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
8088                  icmp_m->hdr.icmp_code);
8089         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
8090                  icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
8091         icmp_header_data_m = rte_be_to_cpu_16(icmp_m->hdr.icmp_seq_nb);
8092         icmp_header_data_m |= rte_be_to_cpu_16(icmp_m->hdr.icmp_ident) << 16;
8093         if (icmp_header_data_m) {
8094                 icmp_header_data_v = rte_be_to_cpu_16(icmp_v->hdr.icmp_seq_nb);
8095                 icmp_header_data_v |=
8096                          rte_be_to_cpu_16(icmp_v->hdr.icmp_ident) << 16;
8097                 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_header_data,
8098                          icmp_header_data_m);
8099                 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_header_data,
8100                          icmp_header_data_v & icmp_header_data_m);
8101         }
8102 }
8103
8104 /**
8105  * Add GTP item to matcher and to the value.
8106  *
8107  * @param[in, out] matcher
8108  *   Flow matcher.
8109  * @param[in, out] key
8110  *   Flow matcher value.
8111  * @param[in] item
8112  *   Flow pattern to translate.
8113  * @param[in] inner
8114  *   Item is inner pattern.
8115  */
8116 static void
8117 flow_dv_translate_item_gtp(void *matcher, void *key,
8118                            const struct rte_flow_item *item, int inner)
8119 {
8120         const struct rte_flow_item_gtp *gtp_m = item->mask;
8121         const struct rte_flow_item_gtp *gtp_v = item->spec;
8122         void *headers_m;
8123         void *headers_v;
8124         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
8125                                      misc_parameters_3);
8126         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
8127         uint16_t dport = RTE_GTPU_UDP_PORT;
8128
8129         if (inner) {
8130                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8131                                          inner_headers);
8132                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8133         } else {
8134                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8135                                          outer_headers);
8136                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8137         }
8138         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8139                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8140                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8141         }
8142         if (!gtp_v)
8143                 return;
8144         if (!gtp_m)
8145                 gtp_m = &rte_flow_item_gtp_mask;
8146         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags,
8147                  gtp_m->v_pt_rsv_flags);
8148         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags,
8149                  gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags);
8150         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
8151         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
8152                  gtp_v->msg_type & gtp_m->msg_type);
8153         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,
8154                  rte_be_to_cpu_32(gtp_m->teid));
8155         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,
8156                  rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
8157 }
8158
8159 /**
8160  * Add GTP PSC item to matcher.
8161  *
8162  * @param[in, out] matcher
8163  *   Flow matcher.
8164  * @param[in, out] key
8165  *   Flow matcher value.
8166  * @param[in] item
8167  *   Flow pattern to translate.
8168  */
8169 static int
8170 flow_dv_translate_item_gtp_psc(void *matcher, void *key,
8171                                const struct rte_flow_item *item)
8172 {
8173         const struct rte_flow_item_gtp_psc *gtp_psc_m = item->mask;
8174         const struct rte_flow_item_gtp_psc *gtp_psc_v = item->spec;
8175         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
8176                         misc_parameters_3);
8177         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
8178         union {
8179                 uint32_t w32;
8180                 struct {
8181                         uint16_t seq_num;
8182                         uint8_t npdu_num;
8183                         uint8_t next_ext_header_type;
8184                 };
8185         } dw_2;
8186         uint8_t gtp_flags;
8187
8188         /* Always set E-flag match on one, regardless of GTP item settings. */
8189         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_m, gtpu_msg_flags);
8190         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
8191         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags, gtp_flags);
8192         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_v, gtpu_msg_flags);
8193         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
8194         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags, gtp_flags);
8195         /*Set next extension header type. */
8196         dw_2.seq_num = 0;
8197         dw_2.npdu_num = 0;
8198         dw_2.next_ext_header_type = 0xff;
8199         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_dw_2,
8200                  rte_cpu_to_be_32(dw_2.w32));
8201         dw_2.seq_num = 0;
8202         dw_2.npdu_num = 0;
8203         dw_2.next_ext_header_type = 0x85;
8204         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_dw_2,
8205                  rte_cpu_to_be_32(dw_2.w32));
8206         if (gtp_psc_v) {
8207                 union {
8208                         uint32_t w32;
8209                         struct {
8210                                 uint8_t len;
8211                                 uint8_t type_flags;
8212                                 uint8_t qfi;
8213                                 uint8_t reserved;
8214                         };
8215                 } dw_0;
8216
8217                 /*Set extension header PDU type and Qos. */
8218                 if (!gtp_psc_m)
8219                         gtp_psc_m = &rte_flow_item_gtp_psc_mask;
8220                 dw_0.w32 = 0;
8221                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_m->pdu_type);
8222                 dw_0.qfi = gtp_psc_m->qfi;
8223                 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_first_ext_dw_0,
8224                          rte_cpu_to_be_32(dw_0.w32));
8225                 dw_0.w32 = 0;
8226                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_v->pdu_type &
8227                                                         gtp_psc_m->pdu_type);
8228                 dw_0.qfi = gtp_psc_v->qfi & gtp_psc_m->qfi;
8229                 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_first_ext_dw_0,
8230                          rte_cpu_to_be_32(dw_0.w32));
8231         }
8232         return 0;
8233 }
8234
8235 /**
8236  * Add eCPRI item to matcher and to the value.
8237  *
8238  * @param[in] dev
8239  *   The devich to configure through.
8240  * @param[in, out] matcher
8241  *   Flow matcher.
8242  * @param[in, out] key
8243  *   Flow matcher value.
8244  * @param[in] item
8245  *   Flow pattern to translate.
8246  * @param[in] samples
8247  *   Sample IDs to be used in the matching.
8248  */
8249 static void
8250 flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
8251                              void *key, const struct rte_flow_item *item)
8252 {
8253         struct mlx5_priv *priv = dev->data->dev_private;
8254         const struct rte_flow_item_ecpri *ecpri_m = item->mask;
8255         const struct rte_flow_item_ecpri *ecpri_v = item->spec;
8256         struct rte_ecpri_common_hdr common;
8257         void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
8258                                      misc_parameters_4);
8259         void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
8260         uint32_t *samples;
8261         void *dw_m;
8262         void *dw_v;
8263
8264         if (!ecpri_v)
8265                 return;
8266         if (!ecpri_m)
8267                 ecpri_m = &rte_flow_item_ecpri_mask;
8268         /*
8269          * Maximal four DW samples are supported in a single matching now.
8270          * Two are used now for a eCPRI matching:
8271          * 1. Type: one byte, mask should be 0x00ff0000 in network order
8272          * 2. ID of a message: one or two bytes, mask 0xffff0000 or 0xff000000
8273          *    if any.
8274          */
8275         if (!ecpri_m->hdr.common.u32)
8276                 return;
8277         samples = priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0].ids;
8278         /* Need to take the whole DW as the mask to fill the entry. */
8279         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
8280                             prog_sample_field_value_0);
8281         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
8282                             prog_sample_field_value_0);
8283         /* Already big endian (network order) in the header. */
8284         *(uint32_t *)dw_m = ecpri_m->hdr.common.u32;
8285         *(uint32_t *)dw_v = ecpri_v->hdr.common.u32 & ecpri_m->hdr.common.u32;
8286         /* Sample#0, used for matching type, offset 0. */
8287         MLX5_SET(fte_match_set_misc4, misc4_m,
8288                  prog_sample_field_id_0, samples[0]);
8289         /* It makes no sense to set the sample ID in the mask field. */
8290         MLX5_SET(fte_match_set_misc4, misc4_v,
8291                  prog_sample_field_id_0, samples[0]);
8292         /*
8293          * Checking if message body part needs to be matched.
8294          * Some wildcard rules only matching type field should be supported.
8295          */
8296         if (ecpri_m->hdr.dummy[0]) {
8297                 common.u32 = rte_be_to_cpu_32(ecpri_v->hdr.common.u32);
8298                 switch (common.type) {
8299                 case RTE_ECPRI_MSG_TYPE_IQ_DATA:
8300                 case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
8301                 case RTE_ECPRI_MSG_TYPE_DLY_MSR:
8302                         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
8303                                             prog_sample_field_value_1);
8304                         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
8305                                             prog_sample_field_value_1);
8306                         *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0];
8307                         *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0] &
8308                                             ecpri_m->hdr.dummy[0];
8309                         /* Sample#1, to match message body, offset 4. */
8310                         MLX5_SET(fte_match_set_misc4, misc4_m,
8311                                  prog_sample_field_id_1, samples[1]);
8312                         MLX5_SET(fte_match_set_misc4, misc4_v,
8313                                  prog_sample_field_id_1, samples[1]);
8314                         break;
8315                 default:
8316                         /* Others, do not match any sample ID. */
8317                         break;
8318                 }
8319         }
8320 }
8321
8322 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
8323
8324 #define HEADER_IS_ZERO(match_criteria, headers)                              \
8325         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
8326                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
8327
8328 /**
8329  * Calculate flow matcher enable bitmap.
8330  *
8331  * @param match_criteria
8332  *   Pointer to flow matcher criteria.
8333  *
8334  * @return
8335  *   Bitmap of enabled fields.
8336  */
8337 static uint8_t
8338 flow_dv_matcher_enable(uint32_t *match_criteria)
8339 {
8340         uint8_t match_criteria_enable;
8341
8342         match_criteria_enable =
8343                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
8344                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
8345         match_criteria_enable |=
8346                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
8347                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
8348         match_criteria_enable |=
8349                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
8350                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
8351         match_criteria_enable |=
8352                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
8353                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
8354         match_criteria_enable |=
8355                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
8356                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
8357         match_criteria_enable |=
8358                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) <<
8359                 MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT;
8360         return match_criteria_enable;
8361 }
8362
8363 struct mlx5_hlist_entry *
8364 flow_dv_tbl_create_cb(struct mlx5_hlist *list, uint64_t key64, void *cb_ctx)
8365 {
8366         struct mlx5_dev_ctx_shared *sh = list->ctx;
8367         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8368         struct rte_eth_dev *dev = ctx->dev;
8369         struct mlx5_flow_tbl_data_entry *tbl_data;
8370         struct mlx5_flow_tbl_tunnel_prm *tt_prm = ctx->data;
8371         struct rte_flow_error *error = ctx->error;
8372         union mlx5_flow_tbl_key key = { .v64 = key64 };
8373         struct mlx5_flow_tbl_resource *tbl;
8374         void *domain;
8375         uint32_t idx = 0;
8376         int ret;
8377
8378         tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
8379         if (!tbl_data) {
8380                 rte_flow_error_set(error, ENOMEM,
8381                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8382                                    NULL,
8383                                    "cannot allocate flow table data entry");
8384                 return NULL;
8385         }
8386         tbl_data->idx = idx;
8387         tbl_data->tunnel = tt_prm->tunnel;
8388         tbl_data->group_id = tt_prm->group_id;
8389         tbl_data->external = !!tt_prm->external;
8390         tbl_data->tunnel_offload = is_tunnel_offload_active(dev);
8391         tbl_data->is_egress = !!key.direction;
8392         tbl_data->is_transfer = !!key.domain;
8393         tbl_data->dummy = !!key.dummy;
8394         tbl_data->table_id = key.table_id;
8395         tbl = &tbl_data->tbl;
8396         if (key.dummy)
8397                 return &tbl_data->entry;
8398         if (key.domain)
8399                 domain = sh->fdb_domain;
8400         else if (key.direction)
8401                 domain = sh->tx_domain;
8402         else
8403                 domain = sh->rx_domain;
8404         ret = mlx5_flow_os_create_flow_tbl(domain, key.table_id, &tbl->obj);
8405         if (ret) {
8406                 rte_flow_error_set(error, ENOMEM,
8407                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8408                                    NULL, "cannot create flow table object");
8409                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
8410                 return NULL;
8411         }
8412         if (key.table_id) {
8413                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
8414                                         (tbl->obj, &tbl_data->jump.action);
8415                 if (ret) {
8416                         rte_flow_error_set(error, ENOMEM,
8417                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8418                                            NULL,
8419                                            "cannot create flow jump action");
8420                         mlx5_flow_os_destroy_flow_tbl(tbl->obj);
8421                         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
8422                         return NULL;
8423                 }
8424         }
8425         MKSTR(matcher_name, "%s_%s_%u_matcher_cache",
8426               key.domain ? "FDB" : "NIC", key.direction ? "egress" : "ingress",
8427               key.table_id);
8428         mlx5_cache_list_init(&tbl_data->matchers, matcher_name, 0, sh,
8429                              flow_dv_matcher_create_cb,
8430                              flow_dv_matcher_match_cb,
8431                              flow_dv_matcher_remove_cb);
8432         return &tbl_data->entry;
8433 }
8434
8435 int
8436 flow_dv_tbl_match_cb(struct mlx5_hlist *list __rte_unused,
8437                      struct mlx5_hlist_entry *entry, uint64_t key64,
8438                      void *cb_ctx __rte_unused)
8439 {
8440         struct mlx5_flow_tbl_data_entry *tbl_data =
8441                 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
8442         union mlx5_flow_tbl_key key = { .v64 = key64 };
8443
8444         return tbl_data->table_id != key.table_id ||
8445                tbl_data->dummy != key.dummy ||
8446                tbl_data->is_transfer != key.domain ||
8447                tbl_data->is_egress != key.direction;
8448 }
8449
8450 /**
8451  * Get a flow table.
8452  *
8453  * @param[in, out] dev
8454  *   Pointer to rte_eth_dev structure.
8455  * @param[in] table_id
8456  *   Table id to use.
8457  * @param[in] egress
8458  *   Direction of the table.
8459  * @param[in] transfer
8460  *   E-Switch or NIC flow.
8461  * @param[in] dummy
8462  *   Dummy entry for dv API.
8463  * @param[out] error
8464  *   pointer to error structure.
8465  *
8466  * @return
8467  *   Returns tables resource based on the index, NULL in case of failed.
8468  */
8469 struct mlx5_flow_tbl_resource *
8470 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
8471                          uint32_t table_id, uint8_t egress,
8472                          uint8_t transfer,
8473                          bool external,
8474                          const struct mlx5_flow_tunnel *tunnel,
8475                          uint32_t group_id, uint8_t dummy,
8476                          struct rte_flow_error *error)
8477 {
8478         struct mlx5_priv *priv = dev->data->dev_private;
8479         union mlx5_flow_tbl_key table_key = {
8480                 {
8481                         .table_id = table_id,
8482                         .dummy = dummy,
8483                         .domain = !!transfer,
8484                         .direction = !!egress,
8485                 }
8486         };
8487         struct mlx5_flow_tbl_tunnel_prm tt_prm = {
8488                 .tunnel = tunnel,
8489                 .group_id = group_id,
8490                 .external = external,
8491         };
8492         struct mlx5_flow_cb_ctx ctx = {
8493                 .dev = dev,
8494                 .error = error,
8495                 .data = &tt_prm,
8496         };
8497         struct mlx5_hlist_entry *entry;
8498         struct mlx5_flow_tbl_data_entry *tbl_data;
8499
8500         entry = mlx5_hlist_register(priv->sh->flow_tbls, table_key.v64, &ctx);
8501         if (!entry) {
8502                 rte_flow_error_set(error, ENOMEM,
8503                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8504                                    "cannot get table");
8505                 return NULL;
8506         }
8507         DRV_LOG(DEBUG, "Table_id %u tunnel %u group %u registered.",
8508                 table_id, tunnel ? tunnel->tunnel_id : 0, group_id);
8509         tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
8510         return &tbl_data->tbl;
8511 }
8512
8513 void
8514 flow_dv_tbl_remove_cb(struct mlx5_hlist *list,
8515                       struct mlx5_hlist_entry *entry)
8516 {
8517         struct mlx5_dev_ctx_shared *sh = list->ctx;
8518         struct mlx5_flow_tbl_data_entry *tbl_data =
8519                 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
8520
8521         MLX5_ASSERT(entry && sh);
8522         if (tbl_data->jump.action)
8523                 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
8524         if (tbl_data->tbl.obj)
8525                 mlx5_flow_os_destroy_flow_tbl(tbl_data->tbl.obj);
8526         if (tbl_data->tunnel_offload && tbl_data->external) {
8527                 struct mlx5_hlist_entry *he;
8528                 struct mlx5_hlist *tunnel_grp_hash;
8529                 struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
8530                 union tunnel_tbl_key tunnel_key = {
8531                         .tunnel_id = tbl_data->tunnel ?
8532                                         tbl_data->tunnel->tunnel_id : 0,
8533                         .group = tbl_data->group_id
8534                 };
8535                 uint32_t table_id = tbl_data->table_id;
8536
8537                 tunnel_grp_hash = tbl_data->tunnel ?
8538                                         tbl_data->tunnel->groups :
8539                                         thub->groups;
8540                 he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val, NULL);
8541                 if (he)
8542                         mlx5_hlist_unregister(tunnel_grp_hash, he);
8543                 DRV_LOG(DEBUG,
8544                         "Table_id %u tunnel %u group %u released.",
8545                         table_id,
8546                         tbl_data->tunnel ?
8547                         tbl_data->tunnel->tunnel_id : 0,
8548                         tbl_data->group_id);
8549         }
8550         mlx5_cache_list_destroy(&tbl_data->matchers);
8551         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
8552 }
8553
8554 /**
8555  * Release a flow table.
8556  *
8557  * @param[in] sh
8558  *   Pointer to device shared structure.
8559  * @param[in] tbl
8560  *   Table resource to be released.
8561  *
8562  * @return
8563  *   Returns 0 if table was released, else return 1;
8564  */
8565 static int
8566 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
8567                              struct mlx5_flow_tbl_resource *tbl)
8568 {
8569         struct mlx5_flow_tbl_data_entry *tbl_data =
8570                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
8571
8572         if (!tbl)
8573                 return 0;
8574         return mlx5_hlist_unregister(sh->flow_tbls, &tbl_data->entry);
8575 }
8576
8577 int
8578 flow_dv_matcher_match_cb(struct mlx5_cache_list *list __rte_unused,
8579                          struct mlx5_cache_entry *entry, void *cb_ctx)
8580 {
8581         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8582         struct mlx5_flow_dv_matcher *ref = ctx->data;
8583         struct mlx5_flow_dv_matcher *cur = container_of(entry, typeof(*cur),
8584                                                         entry);
8585
8586         return cur->crc != ref->crc ||
8587                cur->priority != ref->priority ||
8588                memcmp((const void *)cur->mask.buf,
8589                       (const void *)ref->mask.buf, ref->mask.size);
8590 }
8591
8592 struct mlx5_cache_entry *
8593 flow_dv_matcher_create_cb(struct mlx5_cache_list *list,
8594                           struct mlx5_cache_entry *entry __rte_unused,
8595                           void *cb_ctx)
8596 {
8597         struct mlx5_dev_ctx_shared *sh = list->ctx;
8598         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8599         struct mlx5_flow_dv_matcher *ref = ctx->data;
8600         struct mlx5_flow_dv_matcher *cache;
8601         struct mlx5dv_flow_matcher_attr dv_attr = {
8602                 .type = IBV_FLOW_ATTR_NORMAL,
8603                 .match_mask = (void *)&ref->mask,
8604         };
8605         struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
8606                                                             typeof(*tbl), tbl);
8607         int ret;
8608
8609         cache = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*cache), 0, SOCKET_ID_ANY);
8610         if (!cache) {
8611                 rte_flow_error_set(ctx->error, ENOMEM,
8612                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8613                                    "cannot create matcher");
8614                 return NULL;
8615         }
8616         *cache = *ref;
8617         dv_attr.match_criteria_enable =
8618                 flow_dv_matcher_enable(cache->mask.buf);
8619         dv_attr.priority = ref->priority;
8620         if (tbl->is_egress)
8621                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
8622         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->tbl.obj,
8623                                                &cache->matcher_object);
8624         if (ret) {
8625                 mlx5_free(cache);
8626                 rte_flow_error_set(ctx->error, ENOMEM,
8627                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8628                                    "cannot create matcher");
8629                 return NULL;
8630         }
8631         return &cache->entry;
8632 }
8633
8634 /**
8635  * Register the flow matcher.
8636  *
8637  * @param[in, out] dev
8638  *   Pointer to rte_eth_dev structure.
8639  * @param[in, out] matcher
8640  *   Pointer to flow matcher.
8641  * @param[in, out] key
8642  *   Pointer to flow table key.
8643  * @parm[in, out] dev_flow
8644  *   Pointer to the dev_flow.
8645  * @param[out] error
8646  *   pointer to error structure.
8647  *
8648  * @return
8649  *   0 on success otherwise -errno and errno is set.
8650  */
8651 static int
8652 flow_dv_matcher_register(struct rte_eth_dev *dev,
8653                          struct mlx5_flow_dv_matcher *ref,
8654                          union mlx5_flow_tbl_key *key,
8655                          struct mlx5_flow *dev_flow,
8656                          const struct mlx5_flow_tunnel *tunnel,
8657                          uint32_t group_id,
8658                          struct rte_flow_error *error)
8659 {
8660         struct mlx5_cache_entry *entry;
8661         struct mlx5_flow_dv_matcher *cache;
8662         struct mlx5_flow_tbl_resource *tbl;
8663         struct mlx5_flow_tbl_data_entry *tbl_data;
8664         struct mlx5_flow_cb_ctx ctx = {
8665                 .error = error,
8666                 .data = ref,
8667         };
8668
8669         /**
8670          * tunnel offload API requires this registration for cases when
8671          * tunnel match rule was inserted before tunnel set rule.
8672          */
8673         tbl = flow_dv_tbl_resource_get(dev, key->table_id,
8674                                        key->direction, key->domain,
8675                                        dev_flow->external, tunnel,
8676                                        group_id, 0, error);
8677         if (!tbl)
8678                 return -rte_errno;      /* No need to refill the error info */
8679         tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
8680         ref->tbl = tbl;
8681         entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
8682         if (!entry) {
8683                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
8684                 return rte_flow_error_set(error, ENOMEM,
8685                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8686                                           "cannot allocate ref memory");
8687         }
8688         cache = container_of(entry, typeof(*cache), entry);
8689         dev_flow->handle->dvh.matcher = cache;
8690         return 0;
8691 }
8692
8693 struct mlx5_hlist_entry *
8694 flow_dv_tag_create_cb(struct mlx5_hlist *list, uint64_t key, void *ctx)
8695 {
8696         struct mlx5_dev_ctx_shared *sh = list->ctx;
8697         struct rte_flow_error *error = ctx;
8698         struct mlx5_flow_dv_tag_resource *entry;
8699         uint32_t idx = 0;
8700         int ret;
8701
8702         entry = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
8703         if (!entry) {
8704                 rte_flow_error_set(error, ENOMEM,
8705                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8706                                    "cannot allocate resource memory");
8707                 return NULL;
8708         }
8709         entry->idx = idx;
8710         entry->tag_id = key;
8711         ret = mlx5_flow_os_create_flow_action_tag(key,
8712                                                   &entry->action);
8713         if (ret) {
8714                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], idx);
8715                 rte_flow_error_set(error, ENOMEM,
8716                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8717                                    NULL, "cannot create action");
8718                 return NULL;
8719         }
8720         return &entry->entry;
8721 }
8722
8723 int
8724 flow_dv_tag_match_cb(struct mlx5_hlist *list __rte_unused,
8725                      struct mlx5_hlist_entry *entry, uint64_t key,
8726                      void *cb_ctx __rte_unused)
8727 {
8728         struct mlx5_flow_dv_tag_resource *tag =
8729                 container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
8730
8731         return key != tag->tag_id;
8732 }
8733
8734 /**
8735  * Find existing tag resource or create and register a new one.
8736  *
8737  * @param dev[in, out]
8738  *   Pointer to rte_eth_dev structure.
8739  * @param[in, out] tag_be24
8740  *   Tag value in big endian then R-shift 8.
8741  * @parm[in, out] dev_flow
8742  *   Pointer to the dev_flow.
8743  * @param[out] error
8744  *   pointer to error structure.
8745  *
8746  * @return
8747  *   0 on success otherwise -errno and errno is set.
8748  */
8749 static int
8750 flow_dv_tag_resource_register
8751                         (struct rte_eth_dev *dev,
8752                          uint32_t tag_be24,
8753                          struct mlx5_flow *dev_flow,
8754                          struct rte_flow_error *error)
8755 {
8756         struct mlx5_priv *priv = dev->data->dev_private;
8757         struct mlx5_flow_dv_tag_resource *cache_resource;
8758         struct mlx5_hlist_entry *entry;
8759
8760         entry = mlx5_hlist_register(priv->sh->tag_table, tag_be24, error);
8761         if (entry) {
8762                 cache_resource = container_of
8763                         (entry, struct mlx5_flow_dv_tag_resource, entry);
8764                 dev_flow->handle->dvh.rix_tag = cache_resource->idx;
8765                 dev_flow->dv.tag_resource = cache_resource;
8766                 return 0;
8767         }
8768         return -rte_errno;
8769 }
8770
8771 void
8772 flow_dv_tag_remove_cb(struct mlx5_hlist *list,
8773                       struct mlx5_hlist_entry *entry)
8774 {
8775         struct mlx5_dev_ctx_shared *sh = list->ctx;
8776         struct mlx5_flow_dv_tag_resource *tag =
8777                 container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
8778
8779         MLX5_ASSERT(tag && sh && tag->action);
8780         claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
8781         DRV_LOG(DEBUG, "Tag %p: removed.", (void *)tag);
8782         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
8783 }
8784
8785 /**
8786  * Release the tag.
8787  *
8788  * @param dev
8789  *   Pointer to Ethernet device.
8790  * @param tag_idx
8791  *   Tag index.
8792  *
8793  * @return
8794  *   1 while a reference on it exists, 0 when freed.
8795  */
8796 static int
8797 flow_dv_tag_release(struct rte_eth_dev *dev,
8798                     uint32_t tag_idx)
8799 {
8800         struct mlx5_priv *priv = dev->data->dev_private;
8801         struct mlx5_flow_dv_tag_resource *tag;
8802
8803         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
8804         if (!tag)
8805                 return 0;
8806         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
8807                 dev->data->port_id, (void *)tag, tag->entry.ref_cnt);
8808         return mlx5_hlist_unregister(priv->sh->tag_table, &tag->entry);
8809 }
8810
8811 /**
8812  * Translate port ID action to vport.
8813  *
8814  * @param[in] dev
8815  *   Pointer to rte_eth_dev structure.
8816  * @param[in] action
8817  *   Pointer to the port ID action.
8818  * @param[out] dst_port_id
8819  *   The target port ID.
8820  * @param[out] error
8821  *   Pointer to the error structure.
8822  *
8823  * @return
8824  *   0 on success, a negative errno value otherwise and rte_errno is set.
8825  */
8826 static int
8827 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
8828                                  const struct rte_flow_action *action,
8829                                  uint32_t *dst_port_id,
8830                                  struct rte_flow_error *error)
8831 {
8832         uint32_t port;
8833         struct mlx5_priv *priv;
8834         const struct rte_flow_action_port_id *conf =
8835                         (const struct rte_flow_action_port_id *)action->conf;
8836
8837         port = conf->original ? dev->data->port_id : conf->id;
8838         priv = mlx5_port_to_eswitch_info(port, false);
8839         if (!priv)
8840                 return rte_flow_error_set(error, -rte_errno,
8841                                           RTE_FLOW_ERROR_TYPE_ACTION,
8842                                           NULL,
8843                                           "No eswitch info was found for port");
8844 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
8845         /*
8846          * This parameter is transferred to
8847          * mlx5dv_dr_action_create_dest_ib_port().
8848          */
8849         *dst_port_id = priv->dev_port;
8850 #else
8851         /*
8852          * Legacy mode, no LAG configurations is supported.
8853          * This parameter is transferred to
8854          * mlx5dv_dr_action_create_dest_vport().
8855          */
8856         *dst_port_id = priv->vport_id;
8857 #endif
8858         return 0;
8859 }
8860
8861 /**
8862  * Create a counter with aging configuration.
8863  *
8864  * @param[in] dev
8865  *   Pointer to rte_eth_dev structure.
8866  * @param[out] count
8867  *   Pointer to the counter action configuration.
8868  * @param[in] age
8869  *   Pointer to the aging action configuration.
8870  *
8871  * @return
8872  *   Index to flow counter on success, 0 otherwise.
8873  */
8874 static uint32_t
8875 flow_dv_translate_create_counter(struct rte_eth_dev *dev,
8876                                 struct mlx5_flow *dev_flow,
8877                                 const struct rte_flow_action_count *count,
8878                                 const struct rte_flow_action_age *age)
8879 {
8880         uint32_t counter;
8881         struct mlx5_age_param *age_param;
8882
8883         if (count && count->shared)
8884                 counter = flow_dv_counter_get_shared(dev, count->id);
8885         else
8886                 counter = flow_dv_counter_alloc(dev, !!age);
8887         if (!counter || age == NULL)
8888                 return counter;
8889         age_param  = flow_dv_counter_idx_get_age(dev, counter);
8890         age_param->context = age->context ? age->context :
8891                 (void *)(uintptr_t)(dev_flow->flow_idx);
8892         age_param->timeout = age->timeout;
8893         age_param->port_id = dev->data->port_id;
8894         __atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);
8895         __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
8896         return counter;
8897 }
8898
8899 /**
8900  * Add Tx queue matcher
8901  *
8902  * @param[in] dev
8903  *   Pointer to the dev struct.
8904  * @param[in, out] matcher
8905  *   Flow matcher.
8906  * @param[in, out] key
8907  *   Flow matcher value.
8908  * @param[in] item
8909  *   Flow pattern to translate.
8910  * @param[in] inner
8911  *   Item is inner pattern.
8912  */
8913 static void
8914 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
8915                                 void *matcher, void *key,
8916                                 const struct rte_flow_item *item)
8917 {
8918         const struct mlx5_rte_flow_item_tx_queue *queue_m;
8919         const struct mlx5_rte_flow_item_tx_queue *queue_v;
8920         void *misc_m =
8921                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8922         void *misc_v =
8923                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8924         struct mlx5_txq_ctrl *txq;
8925         uint32_t queue;
8926
8927
8928         queue_m = (const void *)item->mask;
8929         if (!queue_m)
8930                 return;
8931         queue_v = (const void *)item->spec;
8932         if (!queue_v)
8933                 return;
8934         txq = mlx5_txq_get(dev, queue_v->queue);
8935         if (!txq)
8936                 return;
8937         queue = txq->obj->sq->id;
8938         MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue);
8939         MLX5_SET(fte_match_set_misc, misc_v, source_sqn,
8940                  queue & queue_m->queue);
8941         mlx5_txq_release(dev, queue_v->queue);
8942 }
8943
8944 /**
8945  * Set the hash fields according to the @p flow information.
8946  *
8947  * @param[in] dev_flow
8948  *   Pointer to the mlx5_flow.
8949  * @param[in] rss_desc
8950  *   Pointer to the mlx5_flow_rss_desc.
8951  */
8952 static void
8953 flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
8954                        struct mlx5_flow_rss_desc *rss_desc)
8955 {
8956         uint64_t items = dev_flow->handle->layers;
8957         int rss_inner = 0;
8958         uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
8959
8960         dev_flow->hash_fields = 0;
8961 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
8962         if (rss_desc->level >= 2) {
8963                 dev_flow->hash_fields |= IBV_RX_HASH_INNER;
8964                 rss_inner = 1;
8965         }
8966 #endif
8967         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
8968             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
8969                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
8970                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
8971                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
8972                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
8973                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
8974                         else
8975                                 dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
8976                 }
8977         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
8978                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
8979                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
8980                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
8981                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
8982                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
8983                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
8984                         else
8985                                 dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
8986                 }
8987         }
8988         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
8989             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
8990                 if (rss_types & ETH_RSS_UDP) {
8991                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
8992                                 dev_flow->hash_fields |=
8993                                                 IBV_RX_HASH_SRC_PORT_UDP;
8994                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
8995                                 dev_flow->hash_fields |=
8996                                                 IBV_RX_HASH_DST_PORT_UDP;
8997                         else
8998                                 dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
8999                 }
9000         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
9001                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
9002                 if (rss_types & ETH_RSS_TCP) {
9003                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
9004                                 dev_flow->hash_fields |=
9005                                                 IBV_RX_HASH_SRC_PORT_TCP;
9006                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
9007                                 dev_flow->hash_fields |=
9008                                                 IBV_RX_HASH_DST_PORT_TCP;
9009                         else
9010                                 dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
9011                 }
9012         }
9013 }
9014
9015 /**
9016  * Prepare an Rx Hash queue.
9017  *
9018  * @param dev
9019  *   Pointer to Ethernet device.
9020  * @param[in] dev_flow
9021  *   Pointer to the mlx5_flow.
9022  * @param[in] rss_desc
9023  *   Pointer to the mlx5_flow_rss_desc.
9024  * @param[out] hrxq_idx
9025  *   Hash Rx queue index.
9026  *
9027  * @return
9028  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
9029  */
9030 static struct mlx5_hrxq *
9031 flow_dv_hrxq_prepare(struct rte_eth_dev *dev,
9032                      struct mlx5_flow *dev_flow,
9033                      struct mlx5_flow_rss_desc *rss_desc,
9034                      uint32_t *hrxq_idx)
9035 {
9036         struct mlx5_priv *priv = dev->data->dev_private;
9037         struct mlx5_flow_handle *dh = dev_flow->handle;
9038         struct mlx5_hrxq *hrxq;
9039
9040         MLX5_ASSERT(rss_desc->queue_num);
9041         rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
9042         rss_desc->hash_fields = dev_flow->hash_fields;
9043         rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL);
9044         rss_desc->shared_rss = 0;
9045         *hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
9046         if (!*hrxq_idx)
9047                 return NULL;
9048         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
9049                               *hrxq_idx);
9050         return hrxq;
9051 }
9052
9053 /**
9054  * Release sample sub action resource.
9055  *
9056  * @param[in, out] dev
9057  *   Pointer to rte_eth_dev structure.
9058  * @param[in] act_res
9059  *   Pointer to sample sub action resource.
9060  */
9061 static void
9062 flow_dv_sample_sub_actions_release(struct rte_eth_dev *dev,
9063                                    struct mlx5_flow_sub_actions_idx *act_res)
9064 {
9065         if (act_res->rix_hrxq) {
9066                 mlx5_hrxq_release(dev, act_res->rix_hrxq);
9067                 act_res->rix_hrxq = 0;
9068         }
9069         if (act_res->rix_encap_decap) {
9070                 flow_dv_encap_decap_resource_release(dev,
9071                                                      act_res->rix_encap_decap);
9072                 act_res->rix_encap_decap = 0;
9073         }
9074         if (act_res->rix_port_id_action) {
9075                 flow_dv_port_id_action_resource_release(dev,
9076                                                 act_res->rix_port_id_action);
9077                 act_res->rix_port_id_action = 0;
9078         }
9079         if (act_res->rix_tag) {
9080                 flow_dv_tag_release(dev, act_res->rix_tag);
9081                 act_res->rix_tag = 0;
9082         }
9083         if (act_res->rix_jump) {
9084                 flow_dv_jump_tbl_resource_release(dev, act_res->rix_jump);
9085                 act_res->rix_jump = 0;
9086         }
9087 }
9088
9089 int
9090 flow_dv_sample_match_cb(struct mlx5_cache_list *list __rte_unused,
9091                         struct mlx5_cache_entry *entry, void *cb_ctx)
9092 {
9093         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9094         struct rte_eth_dev *dev = ctx->dev;
9095         struct mlx5_flow_dv_sample_resource *resource = ctx->data;
9096         struct mlx5_flow_dv_sample_resource *cache_resource =
9097                         container_of(entry, typeof(*cache_resource), entry);
9098
9099         if (resource->ratio == cache_resource->ratio &&
9100             resource->ft_type == cache_resource->ft_type &&
9101             resource->ft_id == cache_resource->ft_id &&
9102             resource->set_action == cache_resource->set_action &&
9103             !memcmp((void *)&resource->sample_act,
9104                     (void *)&cache_resource->sample_act,
9105                     sizeof(struct mlx5_flow_sub_actions_list))) {
9106                 /*
9107                  * Existing sample action should release the prepared
9108                  * sub-actions reference counter.
9109                  */
9110                 flow_dv_sample_sub_actions_release(dev,
9111                                                 &resource->sample_idx);
9112                 return 0;
9113         }
9114         return 1;
9115 }
9116
9117 struct mlx5_cache_entry *
9118 flow_dv_sample_create_cb(struct mlx5_cache_list *list __rte_unused,
9119                          struct mlx5_cache_entry *entry __rte_unused,
9120                          void *cb_ctx)
9121 {
9122         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9123         struct rte_eth_dev *dev = ctx->dev;
9124         struct mlx5_flow_dv_sample_resource *resource = ctx->data;
9125         void **sample_dv_actions = resource->sub_actions;
9126         struct mlx5_flow_dv_sample_resource *cache_resource;
9127         struct mlx5dv_dr_flow_sampler_attr sampler_attr;
9128         struct mlx5_priv *priv = dev->data->dev_private;
9129         struct mlx5_dev_ctx_shared *sh = priv->sh;
9130         struct mlx5_flow_tbl_resource *tbl;
9131         uint32_t idx = 0;
9132         const uint32_t next_ft_step = 1;
9133         uint32_t next_ft_id = resource->ft_id + next_ft_step;
9134         uint8_t is_egress = 0;
9135         uint8_t is_transfer = 0;
9136         struct rte_flow_error *error = ctx->error;
9137
9138         /* Register new sample resource. */
9139         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
9140         if (!cache_resource) {
9141                 rte_flow_error_set(error, ENOMEM,
9142                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9143                                           NULL,
9144                                           "cannot allocate resource memory");
9145                 return NULL;
9146         }
9147         *cache_resource = *resource;
9148         /* Create normal path table level */
9149         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
9150                 is_transfer = 1;
9151         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
9152                 is_egress = 1;
9153         tbl = flow_dv_tbl_resource_get(dev, next_ft_id,
9154                                         is_egress, is_transfer,
9155                                         true, NULL, 0, 0, error);
9156         if (!tbl) {
9157                 rte_flow_error_set(error, ENOMEM,
9158                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9159                                           NULL,
9160                                           "fail to create normal path table "
9161                                           "for sample");
9162                 goto error;
9163         }
9164         cache_resource->normal_path_tbl = tbl;
9165         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
9166                 if (!sh->default_miss_action) {
9167                         rte_flow_error_set(error, ENOMEM,
9168                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9169                                                 NULL,
9170                                                 "default miss action was not "
9171                                                 "created");
9172                         goto error;
9173                 }
9174                 sample_dv_actions[resource->sample_act.actions_num++] =
9175                                                 sh->default_miss_action;
9176         }
9177         /* Create a DR sample action */
9178         sampler_attr.sample_ratio = cache_resource->ratio;
9179         sampler_attr.default_next_table = tbl->obj;
9180         sampler_attr.num_sample_actions = resource->sample_act.actions_num;
9181         sampler_attr.sample_actions = (struct mlx5dv_dr_action **)
9182                                                         &sample_dv_actions[0];
9183         sampler_attr.action = cache_resource->set_action;
9184         if (mlx5_os_flow_dr_create_flow_action_sampler
9185                         (&sampler_attr, &cache_resource->verbs_action)) {
9186                 rte_flow_error_set(error, ENOMEM,
9187                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9188                                         NULL, "cannot create sample action");
9189                 goto error;
9190         }
9191         cache_resource->idx = idx;
9192         cache_resource->dev = dev;
9193         return &cache_resource->entry;
9194 error:
9195         if (cache_resource->ft_type != MLX5DV_FLOW_TABLE_TYPE_FDB)
9196                 flow_dv_sample_sub_actions_release(dev,
9197                                                    &cache_resource->sample_idx);
9198         if (cache_resource->normal_path_tbl)
9199                 flow_dv_tbl_resource_release(MLX5_SH(dev),
9200                                 cache_resource->normal_path_tbl);
9201         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE], idx);
9202         return NULL;
9203
9204 }
9205
9206 /**
9207  * Find existing sample resource or create and register a new one.
9208  *
9209  * @param[in, out] dev
9210  *   Pointer to rte_eth_dev structure.
9211  * @param[in] resource
9212  *   Pointer to sample resource.
9213  * @parm[in, out] dev_flow
9214  *   Pointer to the dev_flow.
9215  * @param[out] error
9216  *   pointer to error structure.
9217  *
9218  * @return
9219  *   0 on success otherwise -errno and errno is set.
9220  */
9221 static int
9222 flow_dv_sample_resource_register(struct rte_eth_dev *dev,
9223                          struct mlx5_flow_dv_sample_resource *resource,
9224                          struct mlx5_flow *dev_flow,
9225                          struct rte_flow_error *error)
9226 {
9227         struct mlx5_flow_dv_sample_resource *cache_resource;
9228         struct mlx5_cache_entry *entry;
9229         struct mlx5_priv *priv = dev->data->dev_private;
9230         struct mlx5_flow_cb_ctx ctx = {
9231                 .dev = dev,
9232                 .error = error,
9233                 .data = resource,
9234         };
9235
9236         entry = mlx5_cache_register(&priv->sh->sample_action_list, &ctx);
9237         if (!entry)
9238                 return -rte_errno;
9239         cache_resource = container_of(entry, typeof(*cache_resource), entry);
9240         dev_flow->handle->dvh.rix_sample = cache_resource->idx;
9241         dev_flow->dv.sample_res = cache_resource;
9242         return 0;
9243 }
9244
9245 int
9246 flow_dv_dest_array_match_cb(struct mlx5_cache_list *list __rte_unused,
9247                             struct mlx5_cache_entry *entry, void *cb_ctx)
9248 {
9249         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9250         struct mlx5_flow_dv_dest_array_resource *resource = ctx->data;
9251         struct rte_eth_dev *dev = ctx->dev;
9252         struct mlx5_flow_dv_dest_array_resource *cache_resource =
9253                         container_of(entry, typeof(*cache_resource), entry);
9254         uint32_t idx = 0;
9255
9256         if (resource->num_of_dest == cache_resource->num_of_dest &&
9257             resource->ft_type == cache_resource->ft_type &&
9258             !memcmp((void *)cache_resource->sample_act,
9259                     (void *)resource->sample_act,
9260                    (resource->num_of_dest *
9261                    sizeof(struct mlx5_flow_sub_actions_list)))) {
9262                 /*
9263                  * Existing sample action should release the prepared
9264                  * sub-actions reference counter.
9265                  */
9266                 for (idx = 0; idx < resource->num_of_dest; idx++)
9267                         flow_dv_sample_sub_actions_release(dev,
9268                                         &resource->sample_idx[idx]);
9269                 return 0;
9270         }
9271         return 1;
9272 }
9273
9274 struct mlx5_cache_entry *
9275 flow_dv_dest_array_create_cb(struct mlx5_cache_list *list __rte_unused,
9276                          struct mlx5_cache_entry *entry __rte_unused,
9277                          void *cb_ctx)
9278 {
9279         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9280         struct rte_eth_dev *dev = ctx->dev;
9281         struct mlx5_flow_dv_dest_array_resource *cache_resource;
9282         struct mlx5_flow_dv_dest_array_resource *resource = ctx->data;
9283         struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 };
9284         struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM];
9285         struct mlx5_priv *priv = dev->data->dev_private;
9286         struct mlx5_dev_ctx_shared *sh = priv->sh;
9287         struct mlx5_flow_sub_actions_list *sample_act;
9288         struct mlx5dv_dr_domain *domain;
9289         uint32_t idx = 0, res_idx = 0;
9290         struct rte_flow_error *error = ctx->error;
9291         uint64_t action_flags;
9292         int ret;
9293
9294         /* Register new destination array resource. */
9295         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
9296                                             &res_idx);
9297         if (!cache_resource) {
9298                 rte_flow_error_set(error, ENOMEM,
9299                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9300                                           NULL,
9301                                           "cannot allocate resource memory");
9302                 return NULL;
9303         }
9304         *cache_resource = *resource;
9305         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
9306                 domain = sh->fdb_domain;
9307         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
9308                 domain = sh->rx_domain;
9309         else
9310                 domain = sh->tx_domain;
9311         for (idx = 0; idx < resource->num_of_dest; idx++) {
9312                 dest_attr[idx] = (struct mlx5dv_dr_action_dest_attr *)
9313                                  mlx5_malloc(MLX5_MEM_ZERO,
9314                                  sizeof(struct mlx5dv_dr_action_dest_attr),
9315                                  0, SOCKET_ID_ANY);
9316                 if (!dest_attr[idx]) {
9317                         rte_flow_error_set(error, ENOMEM,
9318                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9319                                            NULL,
9320                                            "cannot allocate resource memory");
9321                         goto error;
9322                 }
9323                 dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST;
9324                 sample_act = &resource->sample_act[idx];
9325                 action_flags = sample_act->action_flags;
9326                 switch (action_flags) {
9327                 case MLX5_FLOW_ACTION_QUEUE:
9328                         dest_attr[idx]->dest = sample_act->dr_queue_action;
9329                         break;
9330                 case (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_ENCAP):
9331                         dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST_REFORMAT;
9332                         dest_attr[idx]->dest_reformat = &dest_reformat[idx];
9333                         dest_attr[idx]->dest_reformat->reformat =
9334                                         sample_act->dr_encap_action;
9335                         dest_attr[idx]->dest_reformat->dest =
9336                                         sample_act->dr_port_id_action;
9337                         break;
9338                 case MLX5_FLOW_ACTION_PORT_ID:
9339                         dest_attr[idx]->dest = sample_act->dr_port_id_action;
9340                         break;
9341                 case MLX5_FLOW_ACTION_JUMP:
9342                         dest_attr[idx]->dest = sample_act->dr_jump_action;
9343                         break;
9344                 default:
9345                         rte_flow_error_set(error, EINVAL,
9346                                            RTE_FLOW_ERROR_TYPE_ACTION,
9347                                            NULL,
9348                                            "unsupported actions type");
9349                         goto error;
9350                 }
9351         }
9352         /* create a dest array actioin */
9353         ret = mlx5_os_flow_dr_create_flow_action_dest_array
9354                                                 (domain,
9355                                                  cache_resource->num_of_dest,
9356                                                  dest_attr,
9357                                                  &cache_resource->action);
9358         if (ret) {
9359                 rte_flow_error_set(error, ENOMEM,
9360                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9361                                    NULL,
9362                                    "cannot create destination array action");
9363                 goto error;
9364         }
9365         cache_resource->idx = res_idx;
9366         cache_resource->dev = dev;
9367         for (idx = 0; idx < resource->num_of_dest; idx++)
9368                 mlx5_free(dest_attr[idx]);
9369         return &cache_resource->entry;
9370 error:
9371         for (idx = 0; idx < resource->num_of_dest; idx++) {
9372                 struct mlx5_flow_sub_actions_idx *act_res =
9373                                         &cache_resource->sample_idx[idx];
9374                 if (act_res->rix_hrxq &&
9375                     !mlx5_hrxq_release(dev,
9376                                 act_res->rix_hrxq))
9377                         act_res->rix_hrxq = 0;
9378                 if (act_res->rix_encap_decap &&
9379                         !flow_dv_encap_decap_resource_release(dev,
9380                                 act_res->rix_encap_decap))
9381                         act_res->rix_encap_decap = 0;
9382                 if (act_res->rix_port_id_action &&
9383                         !flow_dv_port_id_action_resource_release(dev,
9384                                 act_res->rix_port_id_action))
9385                         act_res->rix_port_id_action = 0;
9386                 if (act_res->rix_jump &&
9387                         !flow_dv_jump_tbl_resource_release(dev,
9388                                 act_res->rix_jump))
9389                         act_res->rix_jump = 0;
9390                 if (dest_attr[idx])
9391                         mlx5_free(dest_attr[idx]);
9392         }
9393
9394         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY], res_idx);
9395         return NULL;
9396 }
9397
9398 /**
9399  * Find existing destination array resource or create and register a new one.
9400  *
9401  * @param[in, out] dev
9402  *   Pointer to rte_eth_dev structure.
9403  * @param[in] resource
9404  *   Pointer to destination array resource.
9405  * @parm[in, out] dev_flow
9406  *   Pointer to the dev_flow.
9407  * @param[out] error
9408  *   pointer to error structure.
9409  *
9410  * @return
9411  *   0 on success otherwise -errno and errno is set.
9412  */
9413 static int
9414 flow_dv_dest_array_resource_register(struct rte_eth_dev *dev,
9415                          struct mlx5_flow_dv_dest_array_resource *resource,
9416                          struct mlx5_flow *dev_flow,
9417                          struct rte_flow_error *error)
9418 {
9419         struct mlx5_flow_dv_dest_array_resource *cache_resource;
9420         struct mlx5_priv *priv = dev->data->dev_private;
9421         struct mlx5_cache_entry *entry;
9422         struct mlx5_flow_cb_ctx ctx = {
9423                 .dev = dev,
9424                 .error = error,
9425                 .data = resource,
9426         };
9427
9428         entry = mlx5_cache_register(&priv->sh->dest_array_list, &ctx);
9429         if (!entry)
9430                 return -rte_errno;
9431         cache_resource = container_of(entry, typeof(*cache_resource), entry);
9432         dev_flow->handle->dvh.rix_dest_array = cache_resource->idx;
9433         dev_flow->dv.dest_array_res = cache_resource;
9434         return 0;
9435 }
9436
9437 /**
9438  * Convert Sample action to DV specification.
9439  *
9440  * @param[in] dev
9441  *   Pointer to rte_eth_dev structure.
9442  * @param[in] action
9443  *   Pointer to sample action structure.
9444  * @param[in, out] dev_flow
9445  *   Pointer to the mlx5_flow.
9446  * @param[in] attr
9447  *   Pointer to the flow attributes.
9448  * @param[in, out] num_of_dest
9449  *   Pointer to the num of destination.
9450  * @param[in, out] sample_actions
9451  *   Pointer to sample actions list.
9452  * @param[in, out] res
9453  *   Pointer to sample resource.
9454  * @param[out] error
9455  *   Pointer to the error structure.
9456  *
9457  * @return
9458  *   0 on success, a negative errno value otherwise and rte_errno is set.
9459  */
9460 static int
9461 flow_dv_translate_action_sample(struct rte_eth_dev *dev,
9462                                 const struct rte_flow_action_sample *action,
9463                                 struct mlx5_flow *dev_flow,
9464                                 const struct rte_flow_attr *attr,
9465                                 uint32_t *num_of_dest,
9466                                 void **sample_actions,
9467                                 struct mlx5_flow_dv_sample_resource *res,
9468                                 struct rte_flow_error *error)
9469 {
9470         struct mlx5_priv *priv = dev->data->dev_private;
9471         const struct rte_flow_action *sub_actions;
9472         struct mlx5_flow_sub_actions_list *sample_act;
9473         struct mlx5_flow_sub_actions_idx *sample_idx;
9474         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
9475         struct rte_flow *flow = dev_flow->flow;
9476         struct mlx5_flow_rss_desc *rss_desc;
9477         uint64_t action_flags = 0;
9478
9479         MLX5_ASSERT(wks);
9480         rss_desc = &wks->rss_desc;
9481         sample_act = &res->sample_act;
9482         sample_idx = &res->sample_idx;
9483         res->ratio = action->ratio;
9484         sub_actions = action->actions;
9485         for (; sub_actions->type != RTE_FLOW_ACTION_TYPE_END; sub_actions++) {
9486                 int type = sub_actions->type;
9487                 uint32_t pre_rix = 0;
9488                 void *pre_r;
9489                 switch (type) {
9490                 case RTE_FLOW_ACTION_TYPE_QUEUE:
9491                 {
9492                         const struct rte_flow_action_queue *queue;
9493                         struct mlx5_hrxq *hrxq;
9494                         uint32_t hrxq_idx;
9495
9496                         queue = sub_actions->conf;
9497                         rss_desc->queue_num = 1;
9498                         rss_desc->queue[0] = queue->index;
9499                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
9500                                                     rss_desc, &hrxq_idx);
9501                         if (!hrxq)
9502                                 return rte_flow_error_set
9503                                         (error, rte_errno,
9504                                          RTE_FLOW_ERROR_TYPE_ACTION,
9505                                          NULL,
9506                                          "cannot create fate queue");
9507                         sample_act->dr_queue_action = hrxq->action;
9508                         sample_idx->rix_hrxq = hrxq_idx;
9509                         sample_actions[sample_act->actions_num++] =
9510                                                 hrxq->action;
9511                         (*num_of_dest)++;
9512                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
9513                         if (action_flags & MLX5_FLOW_ACTION_MARK)
9514                                 dev_flow->handle->rix_hrxq = hrxq_idx;
9515                         dev_flow->handle->fate_action =
9516                                         MLX5_FLOW_FATE_QUEUE;
9517                         break;
9518                 }
9519                 case RTE_FLOW_ACTION_TYPE_RSS:
9520                 {
9521                         struct mlx5_hrxq *hrxq;
9522                         uint32_t hrxq_idx;
9523                         const struct rte_flow_action_rss *rss;
9524                         const uint8_t *rss_key;
9525
9526                         rss = sub_actions->conf;
9527                         memcpy(rss_desc->queue, rss->queue,
9528                                rss->queue_num * sizeof(uint16_t));
9529                         rss_desc->queue_num = rss->queue_num;
9530                         /* NULL RSS key indicates default RSS key. */
9531                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
9532                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
9533                         /*
9534                          * rss->level and rss.types should be set in advance
9535                          * when expanding items for RSS.
9536                          */
9537                         flow_dv_hashfields_set(dev_flow, rss_desc);
9538                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
9539                                                     rss_desc, &hrxq_idx);
9540                         if (!hrxq)
9541                                 return rte_flow_error_set
9542                                         (error, rte_errno,
9543                                          RTE_FLOW_ERROR_TYPE_ACTION,
9544                                          NULL,
9545                                          "cannot create fate queue");
9546                         sample_act->dr_queue_action = hrxq->action;
9547                         sample_idx->rix_hrxq = hrxq_idx;
9548                         sample_actions[sample_act->actions_num++] =
9549                                                 hrxq->action;
9550                         (*num_of_dest)++;
9551                         action_flags |= MLX5_FLOW_ACTION_RSS;
9552                         if (action_flags & MLX5_FLOW_ACTION_MARK)
9553                                 dev_flow->handle->rix_hrxq = hrxq_idx;
9554                         dev_flow->handle->fate_action =
9555                                         MLX5_FLOW_FATE_QUEUE;
9556                         break;
9557                 }
9558                 case RTE_FLOW_ACTION_TYPE_MARK:
9559                 {
9560                         uint32_t tag_be = mlx5_flow_mark_set
9561                                 (((const struct rte_flow_action_mark *)
9562                                 (sub_actions->conf))->id);
9563
9564                         dev_flow->handle->mark = 1;
9565                         pre_rix = dev_flow->handle->dvh.rix_tag;
9566                         /* Save the mark resource before sample */
9567                         pre_r = dev_flow->dv.tag_resource;
9568                         if (flow_dv_tag_resource_register(dev, tag_be,
9569                                                   dev_flow, error))
9570                                 return -rte_errno;
9571                         MLX5_ASSERT(dev_flow->dv.tag_resource);
9572                         sample_act->dr_tag_action =
9573                                 dev_flow->dv.tag_resource->action;
9574                         sample_idx->rix_tag =
9575                                 dev_flow->handle->dvh.rix_tag;
9576                         sample_actions[sample_act->actions_num++] =
9577                                                 sample_act->dr_tag_action;
9578                         /* Recover the mark resource after sample */
9579                         dev_flow->dv.tag_resource = pre_r;
9580                         dev_flow->handle->dvh.rix_tag = pre_rix;
9581                         action_flags |= MLX5_FLOW_ACTION_MARK;
9582                         break;
9583                 }
9584                 case RTE_FLOW_ACTION_TYPE_COUNT:
9585                 {
9586                         if (!flow->counter) {
9587                                 flow->counter =
9588                                         flow_dv_translate_create_counter(dev,
9589                                                 dev_flow, sub_actions->conf,
9590                                                 0);
9591                                 if (!flow->counter)
9592                                         return rte_flow_error_set
9593                                                 (error, rte_errno,
9594                                                 RTE_FLOW_ERROR_TYPE_ACTION,
9595                                                 NULL,
9596                                                 "cannot create counter"
9597                                                 " object.");
9598                         }
9599                         sample_act->dr_cnt_action =
9600                                   (flow_dv_counter_get_by_idx(dev,
9601                                   flow->counter, NULL))->action;
9602                         sample_actions[sample_act->actions_num++] =
9603                                                 sample_act->dr_cnt_action;
9604                         action_flags |= MLX5_FLOW_ACTION_COUNT;
9605                         break;
9606                 }
9607                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
9608                 {
9609                         struct mlx5_flow_dv_port_id_action_resource
9610                                         port_id_resource;
9611                         uint32_t port_id = 0;
9612
9613                         memset(&port_id_resource, 0, sizeof(port_id_resource));
9614                         /* Save the port id resource before sample */
9615                         pre_rix = dev_flow->handle->rix_port_id_action;
9616                         pre_r = dev_flow->dv.port_id_action;
9617                         if (flow_dv_translate_action_port_id(dev, sub_actions,
9618                                                              &port_id, error))
9619                                 return -rte_errno;
9620                         port_id_resource.port_id = port_id;
9621                         if (flow_dv_port_id_action_resource_register
9622                             (dev, &port_id_resource, dev_flow, error))
9623                                 return -rte_errno;
9624                         sample_act->dr_port_id_action =
9625                                 dev_flow->dv.port_id_action->action;
9626                         sample_idx->rix_port_id_action =
9627                                 dev_flow->handle->rix_port_id_action;
9628                         sample_actions[sample_act->actions_num++] =
9629                                                 sample_act->dr_port_id_action;
9630                         /* Recover the port id resource after sample */
9631                         dev_flow->dv.port_id_action = pre_r;
9632                         dev_flow->handle->rix_port_id_action = pre_rix;
9633                         (*num_of_dest)++;
9634                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
9635                         break;
9636                 }
9637                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
9638                         /* Save the encap resource before sample */
9639                         pre_rix = dev_flow->handle->dvh.rix_encap_decap;
9640                         pre_r = dev_flow->dv.encap_decap;
9641                         if (flow_dv_create_action_l2_encap(dev, sub_actions,
9642                                                            dev_flow,
9643                                                            attr->transfer,
9644                                                            error))
9645                                 return -rte_errno;
9646                         sample_act->dr_encap_action =
9647                                 dev_flow->dv.encap_decap->action;
9648                         sample_idx->rix_encap_decap =
9649                                 dev_flow->handle->dvh.rix_encap_decap;
9650                         sample_actions[sample_act->actions_num++] =
9651                                                 sample_act->dr_encap_action;
9652                         /* Recover the encap resource after sample */
9653                         dev_flow->dv.encap_decap = pre_r;
9654                         dev_flow->handle->dvh.rix_encap_decap = pre_rix;
9655                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
9656                         break;
9657                 default:
9658                         return rte_flow_error_set(error, EINVAL,
9659                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9660                                 NULL,
9661                                 "Not support for sampler action");
9662                 }
9663         }
9664         sample_act->action_flags = action_flags;
9665         res->ft_id = dev_flow->dv.group;
9666         if (attr->transfer) {
9667                 union {
9668                         uint32_t action_in[MLX5_ST_SZ_DW(set_action_in)];
9669                         uint64_t set_action;
9670                 } action_ctx = { .set_action = 0 };
9671
9672                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
9673                 MLX5_SET(set_action_in, action_ctx.action_in, action_type,
9674                          MLX5_MODIFICATION_TYPE_SET);
9675                 MLX5_SET(set_action_in, action_ctx.action_in, field,
9676                          MLX5_MODI_META_REG_C_0);
9677                 MLX5_SET(set_action_in, action_ctx.action_in, data,
9678                          priv->vport_meta_tag);
9679                 res->set_action = action_ctx.set_action;
9680         } else if (attr->ingress) {
9681                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
9682         } else {
9683                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX;
9684         }
9685         return 0;
9686 }
9687
9688 /**
9689  * Convert Sample action to DV specification.
9690  *
9691  * @param[in] dev
9692  *   Pointer to rte_eth_dev structure.
9693  * @param[in, out] dev_flow
9694  *   Pointer to the mlx5_flow.
9695  * @param[in] num_of_dest
9696  *   The num of destination.
9697  * @param[in, out] res
9698  *   Pointer to sample resource.
9699  * @param[in, out] mdest_res
9700  *   Pointer to destination array resource.
9701  * @param[in] sample_actions
9702  *   Pointer to sample path actions list.
9703  * @param[in] action_flags
9704  *   Holds the actions detected until now.
9705  * @param[out] error
9706  *   Pointer to the error structure.
9707  *
9708  * @return
9709  *   0 on success, a negative errno value otherwise and rte_errno is set.
9710  */
9711 static int
9712 flow_dv_create_action_sample(struct rte_eth_dev *dev,
9713                              struct mlx5_flow *dev_flow,
9714                              uint32_t num_of_dest,
9715                              struct mlx5_flow_dv_sample_resource *res,
9716                              struct mlx5_flow_dv_dest_array_resource *mdest_res,
9717                              void **sample_actions,
9718                              uint64_t action_flags,
9719                              struct rte_flow_error *error)
9720 {
9721         /* update normal path action resource into last index of array */
9722         uint32_t dest_index = MLX5_MAX_DEST_NUM - 1;
9723         struct mlx5_flow_sub_actions_list *sample_act =
9724                                         &mdest_res->sample_act[dest_index];
9725         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
9726         struct mlx5_flow_rss_desc *rss_desc;
9727         uint32_t normal_idx = 0;
9728         struct mlx5_hrxq *hrxq;
9729         uint32_t hrxq_idx;
9730
9731         MLX5_ASSERT(wks);
9732         rss_desc = &wks->rss_desc;
9733         if (num_of_dest > 1) {
9734                 if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
9735                         /* Handle QP action for mirroring */
9736                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
9737                                                     rss_desc, &hrxq_idx);
9738                         if (!hrxq)
9739                                 return rte_flow_error_set
9740                                      (error, rte_errno,
9741                                       RTE_FLOW_ERROR_TYPE_ACTION,
9742                                       NULL,
9743                                       "cannot create rx queue");
9744                         normal_idx++;
9745                         mdest_res->sample_idx[dest_index].rix_hrxq = hrxq_idx;
9746                         sample_act->dr_queue_action = hrxq->action;
9747                         if (action_flags & MLX5_FLOW_ACTION_MARK)
9748                                 dev_flow->handle->rix_hrxq = hrxq_idx;
9749                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
9750                 }
9751                 if (sample_act->action_flags & MLX5_FLOW_ACTION_ENCAP) {
9752                         normal_idx++;
9753                         mdest_res->sample_idx[dest_index].rix_encap_decap =
9754                                 dev_flow->handle->dvh.rix_encap_decap;
9755                         sample_act->dr_encap_action =
9756                                 dev_flow->dv.encap_decap->action;
9757                 }
9758                 if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) {
9759                         normal_idx++;
9760                         mdest_res->sample_idx[dest_index].rix_port_id_action =
9761                                 dev_flow->handle->rix_port_id_action;
9762                         sample_act->dr_port_id_action =
9763                                 dev_flow->dv.port_id_action->action;
9764                 }
9765                 if (sample_act->action_flags & MLX5_FLOW_ACTION_JUMP) {
9766                         normal_idx++;
9767                         mdest_res->sample_idx[dest_index].rix_jump =
9768                                 dev_flow->handle->rix_jump;
9769                         sample_act->dr_jump_action =
9770                                 dev_flow->dv.jump->action;
9771                         dev_flow->handle->rix_jump = 0;
9772                 }
9773                 sample_act->actions_num = normal_idx;
9774                 /* update sample action resource into first index of array */
9775                 mdest_res->ft_type = res->ft_type;
9776                 memcpy(&mdest_res->sample_idx[0], &res->sample_idx,
9777                                 sizeof(struct mlx5_flow_sub_actions_idx));
9778                 memcpy(&mdest_res->sample_act[0], &res->sample_act,
9779                                 sizeof(struct mlx5_flow_sub_actions_list));
9780                 mdest_res->num_of_dest = num_of_dest;
9781                 if (flow_dv_dest_array_resource_register(dev, mdest_res,
9782                                                          dev_flow, error))
9783                         return rte_flow_error_set(error, EINVAL,
9784                                                   RTE_FLOW_ERROR_TYPE_ACTION,
9785                                                   NULL, "can't create sample "
9786                                                   "action");
9787         } else {
9788                 res->sub_actions = sample_actions;
9789                 if (flow_dv_sample_resource_register(dev, res, dev_flow, error))
9790                         return rte_flow_error_set(error, EINVAL,
9791                                                   RTE_FLOW_ERROR_TYPE_ACTION,
9792                                                   NULL,
9793                                                   "can't create sample action");
9794         }
9795         return 0;
9796 }
9797
9798 /**
9799  * Remove an ASO age action from age actions list.
9800  *
9801  * @param[in] dev
9802  *   Pointer to the Ethernet device structure.
9803  * @param[in] age
9804  *   Pointer to the aso age action handler.
9805  */
9806 static void
9807 flow_dv_aso_age_remove_from_age(struct rte_eth_dev *dev,
9808                                 struct mlx5_aso_age_action *age)
9809 {
9810         struct mlx5_age_info *age_info;
9811         struct mlx5_age_param *age_param = &age->age_params;
9812         struct mlx5_priv *priv = dev->data->dev_private;
9813         uint16_t expected = AGE_CANDIDATE;
9814
9815         age_info = GET_PORT_AGE_INFO(priv);
9816         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
9817                                          AGE_FREE, false, __ATOMIC_RELAXED,
9818                                          __ATOMIC_RELAXED)) {
9819                 /**
9820                  * We need the lock even it is age timeout,
9821                  * since age action may still in process.
9822                  */
9823                 rte_spinlock_lock(&age_info->aged_sl);
9824                 LIST_REMOVE(age, next);
9825                 rte_spinlock_unlock(&age_info->aged_sl);
9826                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
9827         }
9828 }
9829
9830 /**
9831  * Release an ASO age action.
9832  *
9833  * @param[in] dev
9834  *   Pointer to the Ethernet device structure.
9835  * @param[in] age_idx
9836  *   Index of ASO age action to release.
9837  * @param[in] flow
9838  *   True if the release operation is during flow destroy operation.
9839  *   False if the release operation is during action destroy operation.
9840  *
9841  * @return
9842  *   0 when age action was removed, otherwise the number of references.
9843  */
9844 static int
9845 flow_dv_aso_age_release(struct rte_eth_dev *dev, uint32_t age_idx)
9846 {
9847         struct mlx5_priv *priv = dev->data->dev_private;
9848         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
9849         struct mlx5_aso_age_action *age = flow_aso_age_get_by_idx(dev, age_idx);
9850         uint32_t ret = __atomic_sub_fetch(&age->refcnt, 1, __ATOMIC_RELAXED);
9851
9852         if (!ret) {
9853                 flow_dv_aso_age_remove_from_age(dev, age);
9854                 rte_spinlock_lock(&mng->free_sl);
9855                 LIST_INSERT_HEAD(&mng->free, age, next);
9856                 rte_spinlock_unlock(&mng->free_sl);
9857         }
9858         return ret;
9859 }
9860
9861 /**
9862  * Resize the ASO age pools array by MLX5_CNT_CONTAINER_RESIZE pools.
9863  *
9864  * @param[in] dev
9865  *   Pointer to the Ethernet device structure.
9866  *
9867  * @return
9868  *   0 on success, otherwise negative errno value and rte_errno is set.
9869  */
9870 static int
9871 flow_dv_aso_age_pools_resize(struct rte_eth_dev *dev)
9872 {
9873         struct mlx5_priv *priv = dev->data->dev_private;
9874         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
9875         void *old_pools = mng->pools;
9876         uint32_t resize = mng->n + MLX5_CNT_CONTAINER_RESIZE;
9877         uint32_t mem_size = sizeof(struct mlx5_aso_age_pool *) * resize;
9878         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
9879
9880         if (!pools) {
9881                 rte_errno = ENOMEM;
9882                 return -ENOMEM;
9883         }
9884         if (old_pools) {
9885                 memcpy(pools, old_pools,
9886                        mng->n * sizeof(struct mlx5_flow_counter_pool *));
9887                 mlx5_free(old_pools);
9888         } else {
9889                 /* First ASO flow hit allocation - starting ASO data-path. */
9890                 int ret = mlx5_aso_queue_start(priv->sh);
9891
9892                 if (ret) {
9893                         mlx5_free(pools);
9894                         return ret;
9895                 }
9896         }
9897         mng->n = resize;
9898         mng->pools = pools;
9899         return 0;
9900 }
9901
9902 /**
9903  * Create and initialize a new ASO aging pool.
9904  *
9905  * @param[in] dev
9906  *   Pointer to the Ethernet device structure.
9907  * @param[out] age_free
9908  *   Where to put the pointer of a new age action.
9909  *
9910  * @return
9911  *   The age actions pool pointer and @p age_free is set on success,
9912  *   NULL otherwise and rte_errno is set.
9913  */
9914 static struct mlx5_aso_age_pool *
9915 flow_dv_age_pool_create(struct rte_eth_dev *dev,
9916                         struct mlx5_aso_age_action **age_free)
9917 {
9918         struct mlx5_priv *priv = dev->data->dev_private;
9919         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
9920         struct mlx5_aso_age_pool *pool = NULL;
9921         struct mlx5_devx_obj *obj = NULL;
9922         uint32_t i;
9923
9924         obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->ctx,
9925                                                     priv->sh->pdn);
9926         if (!obj) {
9927                 rte_errno = ENODATA;
9928                 DRV_LOG(ERR, "Failed to create flow_hit_aso_obj using DevX.");
9929                 return NULL;
9930         }
9931         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
9932         if (!pool) {
9933                 claim_zero(mlx5_devx_cmd_destroy(obj));
9934                 rte_errno = ENOMEM;
9935                 return NULL;
9936         }
9937         pool->flow_hit_aso_obj = obj;
9938         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
9939         rte_spinlock_lock(&mng->resize_sl);
9940         pool->index = mng->next;
9941         /* Resize pools array if there is no room for the new pool in it. */
9942         if (pool->index == mng->n && flow_dv_aso_age_pools_resize(dev)) {
9943                 claim_zero(mlx5_devx_cmd_destroy(obj));
9944                 mlx5_free(pool);
9945                 rte_spinlock_unlock(&mng->resize_sl);
9946                 return NULL;
9947         }
9948         mng->pools[pool->index] = pool;
9949         mng->next++;
9950         rte_spinlock_unlock(&mng->resize_sl);
9951         /* Assign the first action in the new pool, the rest go to free list. */
9952         *age_free = &pool->actions[0];
9953         for (i = 1; i < MLX5_ASO_AGE_ACTIONS_PER_POOL; i++) {
9954                 pool->actions[i].offset = i;
9955                 LIST_INSERT_HEAD(&mng->free, &pool->actions[i], next);
9956         }
9957         return pool;
9958 }
9959
9960 /**
9961  * Allocate a ASO aging bit.
9962  *
9963  * @param[in] dev
9964  *   Pointer to the Ethernet device structure.
9965  * @param[out] error
9966  *   Pointer to the error structure.
9967  *
9968  * @return
9969  *   Index to ASO age action on success, 0 otherwise and rte_errno is set.
9970  */
9971 static uint32_t
9972 flow_dv_aso_age_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
9973 {
9974         struct mlx5_priv *priv = dev->data->dev_private;
9975         const struct mlx5_aso_age_pool *pool;
9976         struct mlx5_aso_age_action *age_free = NULL;
9977         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
9978
9979         MLX5_ASSERT(mng);
9980         /* Try to get the next free age action bit. */
9981         rte_spinlock_lock(&mng->free_sl);
9982         age_free = LIST_FIRST(&mng->free);
9983         if (age_free) {
9984                 LIST_REMOVE(age_free, next);
9985         } else if (!flow_dv_age_pool_create(dev, &age_free)) {
9986                 rte_spinlock_unlock(&mng->free_sl);
9987                 rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
9988                                    NULL, "failed to create ASO age pool");
9989                 return 0; /* 0 is an error. */
9990         }
9991         rte_spinlock_unlock(&mng->free_sl);
9992         pool = container_of
9993           ((const struct mlx5_aso_age_action (*)[MLX5_ASO_AGE_ACTIONS_PER_POOL])
9994                   (age_free - age_free->offset), const struct mlx5_aso_age_pool,
9995                                                                        actions);
9996         if (!age_free->dr_action) {
9997                 int reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_FLOW_HIT, 0,
9998                                                  error);
9999
10000                 if (reg_c < 0) {
10001                         rte_flow_error_set(error, rte_errno,
10002                                            RTE_FLOW_ERROR_TYPE_ACTION,
10003                                            NULL, "failed to get reg_c "
10004                                            "for ASO flow hit");
10005                         return 0; /* 0 is an error. */
10006                 }
10007 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
10008                 age_free->dr_action = mlx5_glue->dv_create_flow_action_aso
10009                                 (priv->sh->rx_domain,
10010                                  pool->flow_hit_aso_obj->obj, age_free->offset,
10011                                  MLX5DV_DR_ACTION_FLAGS_ASO_FIRST_HIT_SET,
10012                                  (reg_c - REG_C_0));
10013 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
10014                 if (!age_free->dr_action) {
10015                         rte_errno = errno;
10016                         rte_spinlock_lock(&mng->free_sl);
10017                         LIST_INSERT_HEAD(&mng->free, age_free, next);
10018                         rte_spinlock_unlock(&mng->free_sl);
10019                         rte_flow_error_set(error, rte_errno,
10020                                            RTE_FLOW_ERROR_TYPE_ACTION,
10021                                            NULL, "failed to create ASO "
10022                                            "flow hit action");
10023                         return 0; /* 0 is an error. */
10024                 }
10025         }
10026         __atomic_store_n(&age_free->refcnt, 1, __ATOMIC_RELAXED);
10027         return pool->index | ((age_free->offset + 1) << 16);
10028 }
10029
10030 /**
10031  * Create a age action using ASO mechanism.
10032  *
10033  * @param[in] dev
10034  *   Pointer to rte_eth_dev structure.
10035  * @param[in] age
10036  *   Pointer to the aging action configuration.
10037  * @param[out] error
10038  *   Pointer to the error structure.
10039  *
10040  * @return
10041  *   Index to flow counter on success, 0 otherwise.
10042  */
10043 static uint32_t
10044 flow_dv_translate_create_aso_age(struct rte_eth_dev *dev,
10045                                  const struct rte_flow_action_age *age,
10046                                  struct rte_flow_error *error)
10047 {
10048         uint32_t age_idx = 0;
10049         struct mlx5_aso_age_action *aso_age;
10050
10051         age_idx = flow_dv_aso_age_alloc(dev, error);
10052         if (!age_idx)
10053                 return 0;
10054         aso_age = flow_aso_age_get_by_idx(dev, age_idx);
10055         aso_age->age_params.context = age->context;
10056         aso_age->age_params.timeout = age->timeout;
10057         aso_age->age_params.port_id = dev->data->port_id;
10058         __atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0,
10059                          __ATOMIC_RELAXED);
10060         __atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE,
10061                          __ATOMIC_RELAXED);
10062         return age_idx;
10063 }
10064
10065 /**
10066  * Fill the flow with DV spec, lock free
10067  * (mutex should be acquired by caller).
10068  *
10069  * @param[in] dev
10070  *   Pointer to rte_eth_dev structure.
10071  * @param[in, out] dev_flow
10072  *   Pointer to the sub flow.
10073  * @param[in] attr
10074  *   Pointer to the flow attributes.
10075  * @param[in] items
10076  *   Pointer to the list of items.
10077  * @param[in] actions
10078  *   Pointer to the list of actions.
10079  * @param[out] error
10080  *   Pointer to the error structure.
10081  *
10082  * @return
10083  *   0 on success, a negative errno value otherwise and rte_errno is set.
10084  */
10085 static int
10086 flow_dv_translate(struct rte_eth_dev *dev,
10087                   struct mlx5_flow *dev_flow,
10088                   const struct rte_flow_attr *attr,
10089                   const struct rte_flow_item items[],
10090                   const struct rte_flow_action actions[],
10091                   struct rte_flow_error *error)
10092 {
10093         struct mlx5_priv *priv = dev->data->dev_private;
10094         struct mlx5_dev_config *dev_conf = &priv->config;
10095         struct rte_flow *flow = dev_flow->flow;
10096         struct mlx5_flow_handle *handle = dev_flow->handle;
10097         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
10098         struct mlx5_flow_rss_desc *rss_desc;
10099         uint64_t item_flags = 0;
10100         uint64_t last_item = 0;
10101         uint64_t action_flags = 0;
10102         struct mlx5_flow_dv_matcher matcher = {
10103                 .mask = {
10104                         .size = sizeof(matcher.mask.buf) -
10105                                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
10106                 },
10107         };
10108         int actions_n = 0;
10109         bool actions_end = false;
10110         union {
10111                 struct mlx5_flow_dv_modify_hdr_resource res;
10112                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
10113                             sizeof(struct mlx5_modification_cmd) *
10114                             (MLX5_MAX_MODIFY_NUM + 1)];
10115         } mhdr_dummy;
10116         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
10117         const struct rte_flow_action_count *count = NULL;
10118         const struct rte_flow_action_age *age = NULL;
10119         union flow_dv_attr flow_attr = { .attr = 0 };
10120         uint32_t tag_be;
10121         union mlx5_flow_tbl_key tbl_key;
10122         uint32_t modify_action_position = UINT32_MAX;
10123         void *match_mask = matcher.mask.buf;
10124         void *match_value = dev_flow->dv.value.buf;
10125         uint8_t next_protocol = 0xff;
10126         struct rte_vlan_hdr vlan = { 0 };
10127         struct mlx5_flow_dv_dest_array_resource mdest_res;
10128         struct mlx5_flow_dv_sample_resource sample_res;
10129         void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
10130         const struct rte_flow_action_sample *sample = NULL;
10131         struct mlx5_flow_sub_actions_list *sample_act;
10132         uint32_t sample_act_pos = UINT32_MAX;
10133         uint32_t num_of_dest = 0;
10134         int tmp_actions_n = 0;
10135         uint32_t table;
10136         int ret = 0;
10137         const struct mlx5_flow_tunnel *tunnel;
10138         struct flow_grp_info grp_info = {
10139                 .external = !!dev_flow->external,
10140                 .transfer = !!attr->transfer,
10141                 .fdb_def_rule = !!priv->fdb_def_rule,
10142                 .skip_scale = dev_flow->skip_scale &
10143                         (1 << MLX5_SCALE_FLOW_GROUP_BIT),
10144         };
10145
10146         if (!wks)
10147                 return rte_flow_error_set(error, ENOMEM,
10148                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10149                                           NULL,
10150                                           "failed to push flow workspace");
10151         rss_desc = &wks->rss_desc;
10152         memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
10153         memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
10154         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
10155                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
10156         /* update normal path action resource into last index of array */
10157         sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
10158         tunnel = is_flow_tunnel_match_rule(dev, attr, items, actions) ?
10159                  flow_items_to_tunnel(items) :
10160                  is_flow_tunnel_steer_rule(dev, attr, items, actions) ?
10161                  flow_actions_to_tunnel(actions) :
10162                  dev_flow->tunnel ? dev_flow->tunnel : NULL;
10163         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
10164                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
10165         grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
10166                                 (dev, tunnel, attr, items, actions);
10167         ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
10168                                        &grp_info, error);
10169         if (ret)
10170                 return ret;
10171         dev_flow->dv.group = table;
10172         if (attr->transfer)
10173                 mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
10174         /* number of actions must be set to 0 in case of dirty stack. */
10175         mhdr_res->actions_num = 0;
10176         if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
10177                 /*
10178                  * do not add decap action if match rule drops packet
10179                  * HW rejects rules with decap & drop
10180                  *
10181                  * if tunnel match rule was inserted before matching tunnel set
10182                  * rule flow table used in the match rule must be registered.
10183                  * current implementation handles that in the
10184                  * flow_dv_match_register() at the function end.
10185                  */
10186                 bool add_decap = true;
10187                 const struct rte_flow_action *ptr = actions;
10188
10189                 for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
10190                         if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
10191                                 add_decap = false;
10192                                 break;
10193                         }
10194                 }
10195                 if (add_decap) {
10196                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
10197                                                            attr->transfer,
10198                                                            error))
10199                                 return -rte_errno;
10200                         dev_flow->dv.actions[actions_n++] =
10201                                         dev_flow->dv.encap_decap->action;
10202                         action_flags |= MLX5_FLOW_ACTION_DECAP;
10203                 }
10204         }
10205         for (; !actions_end ; actions++) {
10206                 const struct rte_flow_action_queue *queue;
10207                 const struct rte_flow_action_rss *rss;
10208                 const struct rte_flow_action *action = actions;
10209                 const uint8_t *rss_key;
10210                 const struct rte_flow_action_meter *mtr;
10211                 struct mlx5_flow_tbl_resource *tbl;
10212                 struct mlx5_aso_age_action *age_act;
10213                 uint32_t port_id = 0;
10214                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
10215                 int action_type = actions->type;
10216                 const struct rte_flow_action *found_action = NULL;
10217                 struct mlx5_flow_meter *fm = NULL;
10218                 uint32_t jump_group = 0;
10219
10220                 if (!mlx5_flow_os_action_supported(action_type))
10221                         return rte_flow_error_set(error, ENOTSUP,
10222                                                   RTE_FLOW_ERROR_TYPE_ACTION,
10223                                                   actions,
10224                                                   "action not supported");
10225                 switch (action_type) {
10226                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
10227                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
10228                         break;
10229                 case RTE_FLOW_ACTION_TYPE_VOID:
10230                         break;
10231                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
10232                         if (flow_dv_translate_action_port_id(dev, action,
10233                                                              &port_id, error))
10234                                 return -rte_errno;
10235                         port_id_resource.port_id = port_id;
10236                         MLX5_ASSERT(!handle->rix_port_id_action);
10237                         if (flow_dv_port_id_action_resource_register
10238                             (dev, &port_id_resource, dev_flow, error))
10239                                 return -rte_errno;
10240                         dev_flow->dv.actions[actions_n++] =
10241                                         dev_flow->dv.port_id_action->action;
10242                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
10243                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
10244                         sample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID;
10245                         num_of_dest++;
10246                         break;
10247                 case RTE_FLOW_ACTION_TYPE_FLAG:
10248                         action_flags |= MLX5_FLOW_ACTION_FLAG;
10249                         dev_flow->handle->mark = 1;
10250                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
10251                                 struct rte_flow_action_mark mark = {
10252                                         .id = MLX5_FLOW_MARK_DEFAULT,
10253                                 };
10254
10255                                 if (flow_dv_convert_action_mark(dev, &mark,
10256                                                                 mhdr_res,
10257                                                                 error))
10258                                         return -rte_errno;
10259                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
10260                                 break;
10261                         }
10262                         tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
10263                         /*
10264                          * Only one FLAG or MARK is supported per device flow
10265                          * right now. So the pointer to the tag resource must be
10266                          * zero before the register process.
10267                          */
10268                         MLX5_ASSERT(!handle->dvh.rix_tag);
10269                         if (flow_dv_tag_resource_register(dev, tag_be,
10270                                                           dev_flow, error))
10271                                 return -rte_errno;
10272                         MLX5_ASSERT(dev_flow->dv.tag_resource);
10273                         dev_flow->dv.actions[actions_n++] =
10274                                         dev_flow->dv.tag_resource->action;
10275                         break;
10276                 case RTE_FLOW_ACTION_TYPE_MARK:
10277                         action_flags |= MLX5_FLOW_ACTION_MARK;
10278                         dev_flow->handle->mark = 1;
10279                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
10280                                 const struct rte_flow_action_mark *mark =
10281                                         (const struct rte_flow_action_mark *)
10282                                                 actions->conf;
10283
10284                                 if (flow_dv_convert_action_mark(dev, mark,
10285                                                                 mhdr_res,
10286                                                                 error))
10287                                         return -rte_errno;
10288                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
10289                                 break;
10290                         }
10291                         /* Fall-through */
10292                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
10293                         /* Legacy (non-extensive) MARK action. */
10294                         tag_be = mlx5_flow_mark_set
10295                               (((const struct rte_flow_action_mark *)
10296                                (actions->conf))->id);
10297                         MLX5_ASSERT(!handle->dvh.rix_tag);
10298                         if (flow_dv_tag_resource_register(dev, tag_be,
10299                                                           dev_flow, error))
10300                                 return -rte_errno;
10301                         MLX5_ASSERT(dev_flow->dv.tag_resource);
10302                         dev_flow->dv.actions[actions_n++] =
10303                                         dev_flow->dv.tag_resource->action;
10304                         break;
10305                 case RTE_FLOW_ACTION_TYPE_SET_META:
10306                         if (flow_dv_convert_action_set_meta
10307                                 (dev, mhdr_res, attr,
10308                                  (const struct rte_flow_action_set_meta *)
10309                                   actions->conf, error))
10310                                 return -rte_errno;
10311                         action_flags |= MLX5_FLOW_ACTION_SET_META;
10312                         break;
10313                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
10314                         if (flow_dv_convert_action_set_tag
10315                                 (dev, mhdr_res,
10316                                  (const struct rte_flow_action_set_tag *)
10317                                   actions->conf, error))
10318                                 return -rte_errno;
10319                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
10320                         break;
10321                 case RTE_FLOW_ACTION_TYPE_DROP:
10322                         action_flags |= MLX5_FLOW_ACTION_DROP;
10323                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
10324                         break;
10325                 case RTE_FLOW_ACTION_TYPE_QUEUE:
10326                         queue = actions->conf;
10327                         rss_desc->queue_num = 1;
10328                         rss_desc->queue[0] = queue->index;
10329                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
10330                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
10331                         sample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE;
10332                         num_of_dest++;
10333                         break;
10334                 case RTE_FLOW_ACTION_TYPE_RSS:
10335                         rss = actions->conf;
10336                         memcpy(rss_desc->queue, rss->queue,
10337                                rss->queue_num * sizeof(uint16_t));
10338                         rss_desc->queue_num = rss->queue_num;
10339                         /* NULL RSS key indicates default RSS key. */
10340                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
10341                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
10342                         /*
10343                          * rss->level and rss.types should be set in advance
10344                          * when expanding items for RSS.
10345                          */
10346                         action_flags |= MLX5_FLOW_ACTION_RSS;
10347                         dev_flow->handle->fate_action = rss_desc->shared_rss ?
10348                                 MLX5_FLOW_FATE_SHARED_RSS :
10349                                 MLX5_FLOW_FATE_QUEUE;
10350                         break;
10351                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
10352                         flow->age = (uint32_t)(uintptr_t)(action->conf);
10353                         age_act = flow_aso_age_get_by_idx(dev, flow->age);
10354                         __atomic_fetch_add(&age_act->refcnt, 1,
10355                                            __ATOMIC_RELAXED);
10356                         dev_flow->dv.actions[actions_n++] = age_act->dr_action;
10357                         action_flags |= MLX5_FLOW_ACTION_AGE;
10358                         break;
10359                 case RTE_FLOW_ACTION_TYPE_AGE:
10360                         if (priv->sh->flow_hit_aso_en && attr->group) {
10361                                 /*
10362                                  * Create one shared age action, to be used
10363                                  * by all sub-flows.
10364                                  */
10365                                 if (!flow->age) {
10366                                         flow->age =
10367                                                 flow_dv_translate_create_aso_age
10368                                                         (dev, action->conf,
10369                                                          error);
10370                                         if (!flow->age)
10371                                                 return rte_flow_error_set
10372                                                 (error, rte_errno,
10373                                                  RTE_FLOW_ERROR_TYPE_ACTION,
10374                                                  NULL,
10375                                                  "can't create ASO age action");
10376                                 }
10377                                 dev_flow->dv.actions[actions_n++] =
10378                                           (flow_aso_age_get_by_idx
10379                                                 (dev, flow->age))->dr_action;
10380                                 action_flags |= MLX5_FLOW_ACTION_AGE;
10381                                 break;
10382                         }
10383                         /* Fall-through */
10384                 case RTE_FLOW_ACTION_TYPE_COUNT:
10385                         if (!dev_conf->devx) {
10386                                 return rte_flow_error_set
10387                                               (error, ENOTSUP,
10388                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10389                                                NULL,
10390                                                "count action not supported");
10391                         }
10392                         /* Save information first, will apply later. */
10393                         if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT)
10394                                 count = action->conf;
10395                         else
10396                                 age = action->conf;
10397                         action_flags |= MLX5_FLOW_ACTION_COUNT;
10398                         break;
10399                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
10400                         dev_flow->dv.actions[actions_n++] =
10401                                                 priv->sh->pop_vlan_action;
10402                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
10403                         break;
10404                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
10405                         if (!(action_flags &
10406                               MLX5_FLOW_ACTION_OF_SET_VLAN_VID))
10407                                 flow_dev_get_vlan_info_from_items(items, &vlan);
10408                         vlan.eth_proto = rte_be_to_cpu_16
10409                              ((((const struct rte_flow_action_of_push_vlan *)
10410                                                    actions->conf)->ethertype));
10411                         found_action = mlx5_flow_find_action
10412                                         (actions + 1,
10413                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
10414                         if (found_action)
10415                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
10416                         found_action = mlx5_flow_find_action
10417                                         (actions + 1,
10418                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
10419                         if (found_action)
10420                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
10421                         if (flow_dv_create_action_push_vlan
10422                                             (dev, attr, &vlan, dev_flow, error))
10423                                 return -rte_errno;
10424                         dev_flow->dv.actions[actions_n++] =
10425                                         dev_flow->dv.push_vlan_res->action;
10426                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
10427                         break;
10428                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
10429                         /* of_vlan_push action handled this action */
10430                         MLX5_ASSERT(action_flags &
10431                                     MLX5_FLOW_ACTION_OF_PUSH_VLAN);
10432                         break;
10433                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
10434                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
10435                                 break;
10436                         flow_dev_get_vlan_info_from_items(items, &vlan);
10437                         mlx5_update_vlan_vid_pcp(actions, &vlan);
10438                         /* If no VLAN push - this is a modify header action */
10439                         if (flow_dv_convert_action_modify_vlan_vid
10440                                                 (mhdr_res, actions, error))
10441                                 return -rte_errno;
10442                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
10443                         break;
10444                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
10445                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
10446                         if (flow_dv_create_action_l2_encap(dev, actions,
10447                                                            dev_flow,
10448                                                            attr->transfer,
10449                                                            error))
10450                                 return -rte_errno;
10451                         dev_flow->dv.actions[actions_n++] =
10452                                         dev_flow->dv.encap_decap->action;
10453                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
10454                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
10455                                 sample_act->action_flags |=
10456                                                         MLX5_FLOW_ACTION_ENCAP;
10457                         break;
10458                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
10459                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
10460                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
10461                                                            attr->transfer,
10462                                                            error))
10463                                 return -rte_errno;
10464                         dev_flow->dv.actions[actions_n++] =
10465                                         dev_flow->dv.encap_decap->action;
10466                         action_flags |= MLX5_FLOW_ACTION_DECAP;
10467                         break;
10468                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
10469                         /* Handle encap with preceding decap. */
10470                         if (action_flags & MLX5_FLOW_ACTION_DECAP) {
10471                                 if (flow_dv_create_action_raw_encap
10472                                         (dev, actions, dev_flow, attr, error))
10473                                         return -rte_errno;
10474                                 dev_flow->dv.actions[actions_n++] =
10475                                         dev_flow->dv.encap_decap->action;
10476                         } else {
10477                                 /* Handle encap without preceding decap. */
10478                                 if (flow_dv_create_action_l2_encap
10479                                     (dev, actions, dev_flow, attr->transfer,
10480                                      error))
10481                                         return -rte_errno;
10482                                 dev_flow->dv.actions[actions_n++] =
10483                                         dev_flow->dv.encap_decap->action;
10484                         }
10485                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
10486                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
10487                                 sample_act->action_flags |=
10488                                                         MLX5_FLOW_ACTION_ENCAP;
10489                         break;
10490                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
10491                         while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
10492                                 ;
10493                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
10494                                 if (flow_dv_create_action_l2_decap
10495                                     (dev, dev_flow, attr->transfer, error))
10496                                         return -rte_errno;
10497                                 dev_flow->dv.actions[actions_n++] =
10498                                         dev_flow->dv.encap_decap->action;
10499                         }
10500                         /* If decap is followed by encap, handle it at encap. */
10501                         action_flags |= MLX5_FLOW_ACTION_DECAP;
10502                         break;
10503                 case RTE_FLOW_ACTION_TYPE_JUMP:
10504                         jump_group = ((const struct rte_flow_action_jump *)
10505                                                         action->conf)->group;
10506                         grp_info.std_tbl_fix = 0;
10507                         if (dev_flow->skip_scale &
10508                                 (1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT))
10509                                 grp_info.skip_scale = 1;
10510                         else
10511                                 grp_info.skip_scale = 0;
10512                         ret = mlx5_flow_group_to_table(dev, tunnel,
10513                                                        jump_group,
10514                                                        &table,
10515                                                        &grp_info, error);
10516                         if (ret)
10517                                 return ret;
10518                         tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
10519                                                        attr->transfer,
10520                                                        !!dev_flow->external,
10521                                                        tunnel, jump_group, 0,
10522                                                        error);
10523                         if (!tbl)
10524                                 return rte_flow_error_set
10525                                                 (error, errno,
10526                                                  RTE_FLOW_ERROR_TYPE_ACTION,
10527                                                  NULL,
10528                                                  "cannot create jump action.");
10529                         if (flow_dv_jump_tbl_resource_register
10530                             (dev, tbl, dev_flow, error)) {
10531                                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
10532                                 return rte_flow_error_set
10533                                                 (error, errno,
10534                                                  RTE_FLOW_ERROR_TYPE_ACTION,
10535                                                  NULL,
10536                                                  "cannot create jump action.");
10537                         }
10538                         dev_flow->dv.actions[actions_n++] =
10539                                         dev_flow->dv.jump->action;
10540                         action_flags |= MLX5_FLOW_ACTION_JUMP;
10541                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;
10542                         sample_act->action_flags |= MLX5_FLOW_ACTION_JUMP;
10543                         num_of_dest++;
10544                         break;
10545                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
10546                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
10547                         if (flow_dv_convert_action_modify_mac
10548                                         (mhdr_res, actions, error))
10549                                 return -rte_errno;
10550                         action_flags |= actions->type ==
10551                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
10552                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
10553                                         MLX5_FLOW_ACTION_SET_MAC_DST;
10554                         break;
10555                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
10556                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
10557                         if (flow_dv_convert_action_modify_ipv4
10558                                         (mhdr_res, actions, error))
10559                                 return -rte_errno;
10560                         action_flags |= actions->type ==
10561                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
10562                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
10563                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
10564                         break;
10565                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
10566                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
10567                         if (flow_dv_convert_action_modify_ipv6
10568                                         (mhdr_res, actions, error))
10569                                 return -rte_errno;
10570                         action_flags |= actions->type ==
10571                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
10572                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
10573                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
10574                         break;
10575                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
10576                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
10577                         if (flow_dv_convert_action_modify_tp
10578                                         (mhdr_res, actions, items,
10579                                          &flow_attr, dev_flow, !!(action_flags &
10580                                          MLX5_FLOW_ACTION_DECAP), error))
10581                                 return -rte_errno;
10582                         action_flags |= actions->type ==
10583                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
10584                                         MLX5_FLOW_ACTION_SET_TP_SRC :
10585                                         MLX5_FLOW_ACTION_SET_TP_DST;
10586                         break;
10587                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
10588                         if (flow_dv_convert_action_modify_dec_ttl
10589                                         (mhdr_res, items, &flow_attr, dev_flow,
10590                                          !!(action_flags &
10591                                          MLX5_FLOW_ACTION_DECAP), error))
10592                                 return -rte_errno;
10593                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
10594                         break;
10595                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
10596                         if (flow_dv_convert_action_modify_ttl
10597                                         (mhdr_res, actions, items, &flow_attr,
10598                                          dev_flow, !!(action_flags &
10599                                          MLX5_FLOW_ACTION_DECAP), error))
10600                                 return -rte_errno;
10601                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
10602                         break;
10603                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
10604                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
10605                         if (flow_dv_convert_action_modify_tcp_seq
10606                                         (mhdr_res, actions, error))
10607                                 return -rte_errno;
10608                         action_flags |= actions->type ==
10609                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
10610                                         MLX5_FLOW_ACTION_INC_TCP_SEQ :
10611                                         MLX5_FLOW_ACTION_DEC_TCP_SEQ;
10612                         break;
10613
10614                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
10615                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
10616                         if (flow_dv_convert_action_modify_tcp_ack
10617                                         (mhdr_res, actions, error))
10618                                 return -rte_errno;
10619                         action_flags |= actions->type ==
10620                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
10621                                         MLX5_FLOW_ACTION_INC_TCP_ACK :
10622                                         MLX5_FLOW_ACTION_DEC_TCP_ACK;
10623                         break;
10624                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
10625                         if (flow_dv_convert_action_set_reg
10626                                         (mhdr_res, actions, error))
10627                                 return -rte_errno;
10628                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
10629                         break;
10630                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
10631                         if (flow_dv_convert_action_copy_mreg
10632                                         (dev, mhdr_res, actions, error))
10633                                 return -rte_errno;
10634                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
10635                         break;
10636                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
10637                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
10638                         dev_flow->handle->fate_action =
10639                                         MLX5_FLOW_FATE_DEFAULT_MISS;
10640                         break;
10641                 case RTE_FLOW_ACTION_TYPE_METER:
10642                         mtr = actions->conf;
10643                         if (!flow->meter) {
10644                                 fm = mlx5_flow_meter_attach(priv, mtr->mtr_id,
10645                                                             attr, error);
10646                                 if (!fm)
10647                                         return rte_flow_error_set(error,
10648                                                 rte_errno,
10649                                                 RTE_FLOW_ERROR_TYPE_ACTION,
10650                                                 NULL,
10651                                                 "meter not found "
10652                                                 "or invalid parameters");
10653                                 flow->meter = fm->idx;
10654                         }
10655                         /* Set the meter action. */
10656                         if (!fm) {
10657                                 fm = mlx5_ipool_get(priv->sh->ipool
10658                                                 [MLX5_IPOOL_MTR], flow->meter);
10659                                 if (!fm)
10660                                         return rte_flow_error_set(error,
10661                                                 rte_errno,
10662                                                 RTE_FLOW_ERROR_TYPE_ACTION,
10663                                                 NULL,
10664                                                 "meter not found "
10665                                                 "or invalid parameters");
10666                         }
10667                         dev_flow->dv.actions[actions_n++] =
10668                                 fm->mfts->meter_action;
10669                         action_flags |= MLX5_FLOW_ACTION_METER;
10670                         break;
10671                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
10672                         if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,
10673                                                               actions, error))
10674                                 return -rte_errno;
10675                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
10676                         break;
10677                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
10678                         if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,
10679                                                               actions, error))
10680                                 return -rte_errno;
10681                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
10682                         break;
10683                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
10684                         sample_act_pos = actions_n;
10685                         sample = (const struct rte_flow_action_sample *)
10686                                  action->conf;
10687                         actions_n++;
10688                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
10689                         /* put encap action into group if work with port id */
10690                         if ((action_flags & MLX5_FLOW_ACTION_ENCAP) &&
10691                             (action_flags & MLX5_FLOW_ACTION_PORT_ID))
10692                                 sample_act->action_flags |=
10693                                                         MLX5_FLOW_ACTION_ENCAP;
10694                         break;
10695                 case RTE_FLOW_ACTION_TYPE_END:
10696                         actions_end = true;
10697                         if (mhdr_res->actions_num) {
10698                                 /* create modify action if needed. */
10699                                 if (flow_dv_modify_hdr_resource_register
10700                                         (dev, mhdr_res, dev_flow, error))
10701                                         return -rte_errno;
10702                                 dev_flow->dv.actions[modify_action_position] =
10703                                         handle->dvh.modify_hdr->action;
10704                         }
10705                         if (action_flags & MLX5_FLOW_ACTION_COUNT) {
10706                                 /*
10707                                  * Create one count action, to be used
10708                                  * by all sub-flows.
10709                                  */
10710                                 if (!flow->counter) {
10711                                         flow->counter =
10712                                                 flow_dv_translate_create_counter
10713                                                         (dev, dev_flow, count,
10714                                                          age);
10715                                         if (!flow->counter)
10716                                                 return rte_flow_error_set
10717                                                 (error, rte_errno,
10718                                                  RTE_FLOW_ERROR_TYPE_ACTION,
10719                                                  NULL, "cannot create counter"
10720                                                  " object.");
10721                                 }
10722                                 dev_flow->dv.actions[actions_n] =
10723                                           (flow_dv_counter_get_by_idx(dev,
10724                                           flow->counter, NULL))->action;
10725                                 actions_n++;
10726                         }
10727                 default:
10728                         break;
10729                 }
10730                 if (mhdr_res->actions_num &&
10731                     modify_action_position == UINT32_MAX)
10732                         modify_action_position = actions_n++;
10733         }
10734         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
10735                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
10736                 int item_type = items->type;
10737
10738                 if (!mlx5_flow_os_item_supported(item_type))
10739                         return rte_flow_error_set(error, ENOTSUP,
10740                                                   RTE_FLOW_ERROR_TYPE_ITEM,
10741                                                   NULL, "item not supported");
10742                 switch (item_type) {
10743                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
10744                         flow_dv_translate_item_port_id
10745                                 (dev, match_mask, match_value, items, attr);
10746                         last_item = MLX5_FLOW_ITEM_PORT_ID;
10747                         break;
10748                 case RTE_FLOW_ITEM_TYPE_ETH:
10749                         flow_dv_translate_item_eth(match_mask, match_value,
10750                                                    items, tunnel,
10751                                                    dev_flow->dv.group);
10752                         matcher.priority = action_flags &
10753                                         MLX5_FLOW_ACTION_DEFAULT_MISS &&
10754                                         !dev_flow->external ?
10755                                         MLX5_PRIORITY_MAP_L3 :
10756                                         MLX5_PRIORITY_MAP_L2;
10757                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
10758                                              MLX5_FLOW_LAYER_OUTER_L2;
10759                         break;
10760                 case RTE_FLOW_ITEM_TYPE_VLAN:
10761                         flow_dv_translate_item_vlan(dev_flow,
10762                                                     match_mask, match_value,
10763                                                     items, tunnel,
10764                                                     dev_flow->dv.group);
10765                         matcher.priority = MLX5_PRIORITY_MAP_L2;
10766                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
10767                                               MLX5_FLOW_LAYER_INNER_VLAN) :
10768                                              (MLX5_FLOW_LAYER_OUTER_L2 |
10769                                               MLX5_FLOW_LAYER_OUTER_VLAN);
10770                         break;
10771                 case RTE_FLOW_ITEM_TYPE_IPV4:
10772                         mlx5_flow_tunnel_ip_check(items, next_protocol,
10773                                                   &item_flags, &tunnel);
10774                         flow_dv_translate_item_ipv4(match_mask, match_value,
10775                                                     items, tunnel,
10776                                                     dev_flow->dv.group);
10777                         matcher.priority = MLX5_PRIORITY_MAP_L3;
10778                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
10779                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
10780                         if (items->mask != NULL &&
10781                             ((const struct rte_flow_item_ipv4 *)
10782                              items->mask)->hdr.next_proto_id) {
10783                                 next_protocol =
10784                                         ((const struct rte_flow_item_ipv4 *)
10785                                          (items->spec))->hdr.next_proto_id;
10786                                 next_protocol &=
10787                                         ((const struct rte_flow_item_ipv4 *)
10788                                          (items->mask))->hdr.next_proto_id;
10789                         } else {
10790                                 /* Reset for inner layer. */
10791                                 next_protocol = 0xff;
10792                         }
10793                         break;
10794                 case RTE_FLOW_ITEM_TYPE_IPV6:
10795                         mlx5_flow_tunnel_ip_check(items, next_protocol,
10796                                                   &item_flags, &tunnel);
10797                         flow_dv_translate_item_ipv6(match_mask, match_value,
10798                                                     items, tunnel,
10799                                                     dev_flow->dv.group);
10800                         matcher.priority = MLX5_PRIORITY_MAP_L3;
10801                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
10802                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
10803                         if (items->mask != NULL &&
10804                             ((const struct rte_flow_item_ipv6 *)
10805                              items->mask)->hdr.proto) {
10806                                 next_protocol =
10807                                         ((const struct rte_flow_item_ipv6 *)
10808                                          items->spec)->hdr.proto;
10809                                 next_protocol &=
10810                                         ((const struct rte_flow_item_ipv6 *)
10811                                          items->mask)->hdr.proto;
10812                         } else {
10813                                 /* Reset for inner layer. */
10814                                 next_protocol = 0xff;
10815                         }
10816                         break;
10817                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
10818                         flow_dv_translate_item_ipv6_frag_ext(match_mask,
10819                                                              match_value,
10820                                                              items, tunnel);
10821                         last_item = tunnel ?
10822                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
10823                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
10824                         if (items->mask != NULL &&
10825                             ((const struct rte_flow_item_ipv6_frag_ext *)
10826                              items->mask)->hdr.next_header) {
10827                                 next_protocol =
10828                                 ((const struct rte_flow_item_ipv6_frag_ext *)
10829                                  items->spec)->hdr.next_header;
10830                                 next_protocol &=
10831                                 ((const struct rte_flow_item_ipv6_frag_ext *)
10832                                  items->mask)->hdr.next_header;
10833                         } else {
10834                                 /* Reset for inner layer. */
10835                                 next_protocol = 0xff;
10836                         }
10837                         break;
10838                 case RTE_FLOW_ITEM_TYPE_TCP:
10839                         flow_dv_translate_item_tcp(match_mask, match_value,
10840                                                    items, tunnel);
10841                         matcher.priority = MLX5_PRIORITY_MAP_L4;
10842                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
10843                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
10844                         break;
10845                 case RTE_FLOW_ITEM_TYPE_UDP:
10846                         flow_dv_translate_item_udp(match_mask, match_value,
10847                                                    items, tunnel);
10848                         matcher.priority = MLX5_PRIORITY_MAP_L4;
10849                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
10850                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
10851                         break;
10852                 case RTE_FLOW_ITEM_TYPE_GRE:
10853                         flow_dv_translate_item_gre(match_mask, match_value,
10854                                                    items, tunnel);
10855                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10856                         last_item = MLX5_FLOW_LAYER_GRE;
10857                         break;
10858                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
10859                         flow_dv_translate_item_gre_key(match_mask,
10860                                                        match_value, items);
10861                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
10862                         break;
10863                 case RTE_FLOW_ITEM_TYPE_NVGRE:
10864                         flow_dv_translate_item_nvgre(match_mask, match_value,
10865                                                      items, tunnel);
10866                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10867                         last_item = MLX5_FLOW_LAYER_GRE;
10868                         break;
10869                 case RTE_FLOW_ITEM_TYPE_VXLAN:
10870                         flow_dv_translate_item_vxlan(match_mask, match_value,
10871                                                      items, tunnel);
10872                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10873                         last_item = MLX5_FLOW_LAYER_VXLAN;
10874                         break;
10875                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
10876                         flow_dv_translate_item_vxlan_gpe(match_mask,
10877                                                          match_value, items,
10878                                                          tunnel);
10879                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10880                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
10881                         break;
10882                 case RTE_FLOW_ITEM_TYPE_GENEVE:
10883                         flow_dv_translate_item_geneve(match_mask, match_value,
10884                                                       items, tunnel);
10885                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10886                         last_item = MLX5_FLOW_LAYER_GENEVE;
10887                         break;
10888                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
10889                         ret = flow_dv_translate_item_geneve_opt(dev, match_mask,
10890                                                           match_value,
10891                                                           items, error);
10892                         if (ret)
10893                                 return rte_flow_error_set(error, -ret,
10894                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
10895                                         "cannot create GENEVE TLV option");
10896                         flow->geneve_tlv_option = 1;
10897                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
10898                         break;
10899                 case RTE_FLOW_ITEM_TYPE_MPLS:
10900                         flow_dv_translate_item_mpls(match_mask, match_value,
10901                                                     items, last_item, tunnel);
10902                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10903                         last_item = MLX5_FLOW_LAYER_MPLS;
10904                         break;
10905                 case RTE_FLOW_ITEM_TYPE_MARK:
10906                         flow_dv_translate_item_mark(dev, match_mask,
10907                                                     match_value, items);
10908                         last_item = MLX5_FLOW_ITEM_MARK;
10909                         break;
10910                 case RTE_FLOW_ITEM_TYPE_META:
10911                         flow_dv_translate_item_meta(dev, match_mask,
10912                                                     match_value, attr, items);
10913                         last_item = MLX5_FLOW_ITEM_METADATA;
10914                         break;
10915                 case RTE_FLOW_ITEM_TYPE_ICMP:
10916                         flow_dv_translate_item_icmp(match_mask, match_value,
10917                                                     items, tunnel);
10918                         last_item = MLX5_FLOW_LAYER_ICMP;
10919                         break;
10920                 case RTE_FLOW_ITEM_TYPE_ICMP6:
10921                         flow_dv_translate_item_icmp6(match_mask, match_value,
10922                                                       items, tunnel);
10923                         last_item = MLX5_FLOW_LAYER_ICMP6;
10924                         break;
10925                 case RTE_FLOW_ITEM_TYPE_TAG:
10926                         flow_dv_translate_item_tag(dev, match_mask,
10927                                                    match_value, items);
10928                         last_item = MLX5_FLOW_ITEM_TAG;
10929                         break;
10930                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
10931                         flow_dv_translate_mlx5_item_tag(dev, match_mask,
10932                                                         match_value, items);
10933                         last_item = MLX5_FLOW_ITEM_TAG;
10934                         break;
10935                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
10936                         flow_dv_translate_item_tx_queue(dev, match_mask,
10937                                                         match_value,
10938                                                         items);
10939                         last_item = MLX5_FLOW_ITEM_TX_QUEUE;
10940                         break;
10941                 case RTE_FLOW_ITEM_TYPE_GTP:
10942                         flow_dv_translate_item_gtp(match_mask, match_value,
10943                                                    items, tunnel);
10944                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10945                         last_item = MLX5_FLOW_LAYER_GTP;
10946                         break;
10947                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
10948                         ret = flow_dv_translate_item_gtp_psc(match_mask,
10949                                                           match_value,
10950                                                           items);
10951                         if (ret)
10952                                 return rte_flow_error_set(error, -ret,
10953                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
10954                                         "cannot create GTP PSC item");
10955                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
10956                         break;
10957                 case RTE_FLOW_ITEM_TYPE_ECPRI:
10958                         if (!mlx5_flex_parser_ecpri_exist(dev)) {
10959                                 /* Create it only the first time to be used. */
10960                                 ret = mlx5_flex_parser_ecpri_alloc(dev);
10961                                 if (ret)
10962                                         return rte_flow_error_set
10963                                                 (error, -ret,
10964                                                 RTE_FLOW_ERROR_TYPE_ITEM,
10965                                                 NULL,
10966                                                 "cannot create eCPRI parser");
10967                         }
10968                         /* Adjust the length matcher and device flow value. */
10969                         matcher.mask.size = MLX5_ST_SZ_BYTES(fte_match_param);
10970                         dev_flow->dv.value.size =
10971                                         MLX5_ST_SZ_BYTES(fte_match_param);
10972                         flow_dv_translate_item_ecpri(dev, match_mask,
10973                                                      match_value, items);
10974                         /* No other protocol should follow eCPRI layer. */
10975                         last_item = MLX5_FLOW_LAYER_ECPRI;
10976                         break;
10977                 default:
10978                         break;
10979                 }
10980                 item_flags |= last_item;
10981         }
10982         /*
10983          * When E-Switch mode is enabled, we have two cases where we need to
10984          * set the source port manually.
10985          * The first one, is in case of Nic steering rule, and the second is
10986          * E-Switch rule where no port_id item was found. In both cases
10987          * the source port is set according the current port in use.
10988          */
10989         if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
10990             (priv->representor || priv->master)) {
10991                 if (flow_dv_translate_item_port_id(dev, match_mask,
10992                                                    match_value, NULL, attr))
10993                         return -rte_errno;
10994         }
10995 #ifdef RTE_LIBRTE_MLX5_DEBUG
10996         MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
10997                                               dev_flow->dv.value.buf));
10998 #endif
10999         /*
11000          * Layers may be already initialized from prefix flow if this dev_flow
11001          * is the suffix flow.
11002          */
11003         handle->layers |= item_flags;
11004         if (action_flags & MLX5_FLOW_ACTION_RSS)
11005                 flow_dv_hashfields_set(dev_flow, rss_desc);
11006         /* If has RSS action in the sample action, the Sample/Mirror resource
11007          * should be registered after the hash filed be update.
11008          */
11009         if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
11010                 ret = flow_dv_translate_action_sample(dev,
11011                                                       sample,
11012                                                       dev_flow, attr,
11013                                                       &num_of_dest,
11014                                                       sample_actions,
11015                                                       &sample_res,
11016                                                       error);
11017                 if (ret < 0)
11018                         return ret;
11019                 ret = flow_dv_create_action_sample(dev,
11020                                                    dev_flow,
11021                                                    num_of_dest,
11022                                                    &sample_res,
11023                                                    &mdest_res,
11024                                                    sample_actions,
11025                                                    action_flags,
11026                                                    error);
11027                 if (ret < 0)
11028                         return rte_flow_error_set
11029                                                 (error, rte_errno,
11030                                                 RTE_FLOW_ERROR_TYPE_ACTION,
11031                                                 NULL,
11032                                                 "cannot create sample action");
11033                 if (num_of_dest > 1) {
11034                         dev_flow->dv.actions[sample_act_pos] =
11035                         dev_flow->dv.dest_array_res->action;
11036                 } else {
11037                         dev_flow->dv.actions[sample_act_pos] =
11038                         dev_flow->dv.sample_res->verbs_action;
11039                 }
11040         }
11041         /*
11042          * For multiple destination (sample action with ratio=1), the encap
11043          * action and port id action will be combined into group action.
11044          * So need remove the original these actions in the flow and only
11045          * use the sample action instead of.
11046          */
11047         if (num_of_dest > 1 &&
11048             (sample_act->dr_port_id_action || sample_act->dr_jump_action)) {
11049                 int i;
11050                 void *temp_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
11051
11052                 for (i = 0; i < actions_n; i++) {
11053                         if ((sample_act->dr_encap_action &&
11054                                 sample_act->dr_encap_action ==
11055                                 dev_flow->dv.actions[i]) ||
11056                                 (sample_act->dr_port_id_action &&
11057                                 sample_act->dr_port_id_action ==
11058                                 dev_flow->dv.actions[i]) ||
11059                                 (sample_act->dr_jump_action &&
11060                                 sample_act->dr_jump_action ==
11061                                 dev_flow->dv.actions[i]))
11062                                 continue;
11063                         temp_actions[tmp_actions_n++] = dev_flow->dv.actions[i];
11064                 }
11065                 memcpy((void *)dev_flow->dv.actions,
11066                                 (void *)temp_actions,
11067                                 tmp_actions_n * sizeof(void *));
11068                 actions_n = tmp_actions_n;
11069         }
11070         dev_flow->dv.actions_n = actions_n;
11071         dev_flow->act_flags = action_flags;
11072         /* Register matcher. */
11073         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
11074                                     matcher.mask.size);
11075         matcher.priority = mlx5_get_matcher_priority(dev, attr,
11076                                         matcher.priority);
11077         /* reserved field no needs to be set to 0 here. */
11078         tbl_key.domain = attr->transfer;
11079         tbl_key.direction = attr->egress;
11080         tbl_key.table_id = dev_flow->dv.group;
11081         if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow,
11082                                      tunnel, attr->group, error))
11083                 return -rte_errno;
11084         return 0;
11085 }
11086
11087 /**
11088  * Set hash RX queue by hash fields (see enum ibv_rx_hash_fields)
11089  * and tunnel.
11090  *
11091  * @param[in, out] action
11092  *   Shred RSS action holding hash RX queue objects.
11093  * @param[in] hash_fields
11094  *   Defines combination of packet fields to participate in RX hash.
11095  * @param[in] tunnel
11096  *   Tunnel type
11097  * @param[in] hrxq_idx
11098  *   Hash RX queue index to set.
11099  *
11100  * @return
11101  *   0 on success, otherwise negative errno value.
11102  */
11103 static int
11104 __flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action,
11105                               const uint64_t hash_fields,
11106                               const int tunnel,
11107                               uint32_t hrxq_idx)
11108 {
11109         uint32_t *hrxqs = tunnel ? action->hrxq : action->hrxq_tunnel;
11110
11111         switch (hash_fields & ~IBV_RX_HASH_INNER) {
11112         case MLX5_RSS_HASH_IPV4:
11113                 hrxqs[0] = hrxq_idx;
11114                 return 0;
11115         case MLX5_RSS_HASH_IPV4_TCP:
11116                 hrxqs[1] = hrxq_idx;
11117                 return 0;
11118         case MLX5_RSS_HASH_IPV4_UDP:
11119                 hrxqs[2] = hrxq_idx;
11120                 return 0;
11121         case MLX5_RSS_HASH_IPV6:
11122                 hrxqs[3] = hrxq_idx;
11123                 return 0;
11124         case MLX5_RSS_HASH_IPV6_TCP:
11125                 hrxqs[4] = hrxq_idx;
11126                 return 0;
11127         case MLX5_RSS_HASH_IPV6_UDP:
11128                 hrxqs[5] = hrxq_idx;
11129                 return 0;
11130         case MLX5_RSS_HASH_NONE:
11131                 hrxqs[6] = hrxq_idx;
11132                 return 0;
11133         default:
11134                 return -1;
11135         }
11136 }
11137
11138 /**
11139  * Look up for hash RX queue by hash fields (see enum ibv_rx_hash_fields)
11140  * and tunnel.
11141  *
11142  * @param[in] dev
11143  *   Pointer to the Ethernet device structure.
11144  * @param[in] idx
11145  *   Shared RSS action ID holding hash RX queue objects.
11146  * @param[in] hash_fields
11147  *   Defines combination of packet fields to participate in RX hash.
11148  * @param[in] tunnel
11149  *   Tunnel type
11150  *
11151  * @return
11152  *   Valid hash RX queue index, otherwise 0.
11153  */
11154 static uint32_t
11155 __flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
11156                                  const uint64_t hash_fields,
11157                                  const int tunnel)
11158 {
11159         struct mlx5_priv *priv = dev->data->dev_private;
11160         struct mlx5_shared_action_rss *shared_rss =
11161             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
11162         const uint32_t *hrxqs = tunnel ? shared_rss->hrxq :
11163                                                         shared_rss->hrxq_tunnel;
11164
11165         switch (hash_fields & ~IBV_RX_HASH_INNER) {
11166         case MLX5_RSS_HASH_IPV4:
11167                 return hrxqs[0];
11168         case MLX5_RSS_HASH_IPV4_TCP:
11169                 return hrxqs[1];
11170         case MLX5_RSS_HASH_IPV4_UDP:
11171                 return hrxqs[2];
11172         case MLX5_RSS_HASH_IPV6:
11173                 return hrxqs[3];
11174         case MLX5_RSS_HASH_IPV6_TCP:
11175                 return hrxqs[4];
11176         case MLX5_RSS_HASH_IPV6_UDP:
11177                 return hrxqs[5];
11178         case MLX5_RSS_HASH_NONE:
11179                 return hrxqs[6];
11180         default:
11181                 return 0;
11182         }
11183 }
11184
11185 /**
11186  * Apply the flow to the NIC, lock free,
11187  * (mutex should be acquired by caller).
11188  *
11189  * @param[in] dev
11190  *   Pointer to the Ethernet device structure.
11191  * @param[in, out] flow
11192  *   Pointer to flow structure.
11193  * @param[out] error
11194  *   Pointer to error structure.
11195  *
11196  * @return
11197  *   0 on success, a negative errno value otherwise and rte_errno is set.
11198  */
11199 static int
11200 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
11201               struct rte_flow_error *error)
11202 {
11203         struct mlx5_flow_dv_workspace *dv;
11204         struct mlx5_flow_handle *dh;
11205         struct mlx5_flow_handle_dv *dv_h;
11206         struct mlx5_flow *dev_flow;
11207         struct mlx5_priv *priv = dev->data->dev_private;
11208         uint32_t handle_idx;
11209         int n;
11210         int err;
11211         int idx;
11212         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
11213         struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
11214
11215         MLX5_ASSERT(wks);
11216         for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
11217                 dev_flow = &wks->flows[idx];
11218                 dv = &dev_flow->dv;
11219                 dh = dev_flow->handle;
11220                 dv_h = &dh->dvh;
11221                 n = dv->actions_n;
11222                 if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
11223                         if (dv->transfer) {
11224                                 dv->actions[n++] = priv->sh->esw_drop_action;
11225                         } else {
11226                                 MLX5_ASSERT(priv->drop_queue.hrxq);
11227                                 dv->actions[n++] =
11228                                                 priv->drop_queue.hrxq->action;
11229                         }
11230                 } else if ((dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
11231                            !dv_h->rix_sample && !dv_h->rix_dest_array)) {
11232                         struct mlx5_hrxq *hrxq;
11233                         uint32_t hrxq_idx;
11234
11235                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
11236                                                     &hrxq_idx);
11237                         if (!hrxq) {
11238                                 rte_flow_error_set
11239                                         (error, rte_errno,
11240                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11241                                          "cannot get hash queue");
11242                                 goto error;
11243                         }
11244                         dh->rix_hrxq = hrxq_idx;
11245                         dv->actions[n++] = hrxq->action;
11246                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
11247                         struct mlx5_hrxq *hrxq = NULL;
11248                         uint32_t hrxq_idx;
11249
11250                         hrxq_idx = __flow_dv_action_rss_hrxq_lookup(dev,
11251                                                 rss_desc->shared_rss,
11252                                                 dev_flow->hash_fields,
11253                                                 !!(dh->layers &
11254                                                 MLX5_FLOW_LAYER_TUNNEL));
11255                         if (hrxq_idx)
11256                                 hrxq = mlx5_ipool_get
11257                                         (priv->sh->ipool[MLX5_IPOOL_HRXQ],
11258                                          hrxq_idx);
11259                         if (!hrxq) {
11260                                 rte_flow_error_set
11261                                         (error, rte_errno,
11262                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11263                                          "cannot get hash queue");
11264                                 goto error;
11265                         }
11266                         dh->rix_srss = rss_desc->shared_rss;
11267                         dv->actions[n++] = hrxq->action;
11268                 } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
11269                         if (!priv->sh->default_miss_action) {
11270                                 rte_flow_error_set
11271                                         (error, rte_errno,
11272                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11273                                          "default miss action not be created.");
11274                                 goto error;
11275                         }
11276                         dv->actions[n++] = priv->sh->default_miss_action;
11277                 }
11278                 err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object,
11279                                                (void *)&dv->value, n,
11280                                                dv->actions, &dh->drv_flow);
11281                 if (err) {
11282                         rte_flow_error_set(error, errno,
11283                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11284                                            NULL,
11285                                            "hardware refuses to create flow");
11286                         goto error;
11287                 }
11288                 if (priv->vmwa_context &&
11289                     dh->vf_vlan.tag && !dh->vf_vlan.created) {
11290                         /*
11291                          * The rule contains the VLAN pattern.
11292                          * For VF we are going to create VLAN
11293                          * interface to make hypervisor set correct
11294                          * e-Switch vport context.
11295                          */
11296                         mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
11297                 }
11298         }
11299         return 0;
11300 error:
11301         err = rte_errno; /* Save rte_errno before cleanup. */
11302         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
11303                        handle_idx, dh, next) {
11304                 /* hrxq is union, don't clear it if the flag is not set. */
11305                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) {
11306                         mlx5_hrxq_release(dev, dh->rix_hrxq);
11307                         dh->rix_hrxq = 0;
11308                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
11309                         dh->rix_srss = 0;
11310                 }
11311                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
11312                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
11313         }
11314         rte_errno = err; /* Restore rte_errno. */
11315         return -rte_errno;
11316 }
11317
11318 void
11319 flow_dv_matcher_remove_cb(struct mlx5_cache_list *list __rte_unused,
11320                           struct mlx5_cache_entry *entry)
11321 {
11322         struct mlx5_flow_dv_matcher *cache = container_of(entry, typeof(*cache),
11323                                                           entry);
11324
11325         claim_zero(mlx5_flow_os_destroy_flow_matcher(cache->matcher_object));
11326         mlx5_free(cache);
11327 }
11328
11329 /**
11330  * Release the flow matcher.
11331  *
11332  * @param dev
11333  *   Pointer to Ethernet device.
11334  * @param port_id
11335  *   Index to port ID action resource.
11336  *
11337  * @return
11338  *   1 while a reference on it exists, 0 when freed.
11339  */
11340 static int
11341 flow_dv_matcher_release(struct rte_eth_dev *dev,
11342                         struct mlx5_flow_handle *handle)
11343 {
11344         struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
11345         struct mlx5_flow_tbl_data_entry *tbl = container_of(matcher->tbl,
11346                                                             typeof(*tbl), tbl);
11347         int ret;
11348
11349         MLX5_ASSERT(matcher->matcher_object);
11350         ret = mlx5_cache_unregister(&tbl->matchers, &matcher->entry);
11351         flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl->tbl);
11352         return ret;
11353 }
11354
11355 /**
11356  * Release encap_decap resource.
11357  *
11358  * @param list
11359  *   Pointer to the hash list.
11360  * @param entry
11361  *   Pointer to exist resource entry object.
11362  */
11363 void
11364 flow_dv_encap_decap_remove_cb(struct mlx5_hlist *list,
11365                               struct mlx5_hlist_entry *entry)
11366 {
11367         struct mlx5_dev_ctx_shared *sh = list->ctx;
11368         struct mlx5_flow_dv_encap_decap_resource *res =
11369                 container_of(entry, typeof(*res), entry);
11370
11371         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
11372         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
11373 }
11374
11375 /**
11376  * Release an encap/decap resource.
11377  *
11378  * @param dev
11379  *   Pointer to Ethernet device.
11380  * @param encap_decap_idx
11381  *   Index of encap decap resource.
11382  *
11383  * @return
11384  *   1 while a reference on it exists, 0 when freed.
11385  */
11386 static int
11387 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
11388                                      uint32_t encap_decap_idx)
11389 {
11390         struct mlx5_priv *priv = dev->data->dev_private;
11391         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
11392
11393         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
11394                                         encap_decap_idx);
11395         if (!cache_resource)
11396                 return 0;
11397         MLX5_ASSERT(cache_resource->action);
11398         return mlx5_hlist_unregister(priv->sh->encaps_decaps,
11399                                      &cache_resource->entry);
11400 }
11401
11402 /**
11403  * Release an jump to table action resource.
11404  *
11405  * @param dev
11406  *   Pointer to Ethernet device.
11407  * @param rix_jump
11408  *   Index to the jump action resource.
11409  *
11410  * @return
11411  *   1 while a reference on it exists, 0 when freed.
11412  */
11413 static int
11414 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
11415                                   uint32_t rix_jump)
11416 {
11417         struct mlx5_priv *priv = dev->data->dev_private;
11418         struct mlx5_flow_tbl_data_entry *tbl_data;
11419
11420         tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
11421                                   rix_jump);
11422         if (!tbl_data)
11423                 return 0;
11424         return flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl_data->tbl);
11425 }
11426
11427 void
11428 flow_dv_modify_remove_cb(struct mlx5_hlist *list __rte_unused,
11429                          struct mlx5_hlist_entry *entry)
11430 {
11431         struct mlx5_flow_dv_modify_hdr_resource *res =
11432                 container_of(entry, typeof(*res), entry);
11433
11434         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
11435         mlx5_free(entry);
11436 }
11437
11438 /**
11439  * Release a modify-header resource.
11440  *
11441  * @param dev
11442  *   Pointer to Ethernet device.
11443  * @param handle
11444  *   Pointer to mlx5_flow_handle.
11445  *
11446  * @return
11447  *   1 while a reference on it exists, 0 when freed.
11448  */
11449 static int
11450 flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev,
11451                                     struct mlx5_flow_handle *handle)
11452 {
11453         struct mlx5_priv *priv = dev->data->dev_private;
11454         struct mlx5_flow_dv_modify_hdr_resource *entry = handle->dvh.modify_hdr;
11455
11456         MLX5_ASSERT(entry->action);
11457         return mlx5_hlist_unregister(priv->sh->modify_cmds, &entry->entry);
11458 }
11459
11460 void
11461 flow_dv_port_id_remove_cb(struct mlx5_cache_list *list,
11462                           struct mlx5_cache_entry *entry)
11463 {
11464         struct mlx5_dev_ctx_shared *sh = list->ctx;
11465         struct mlx5_flow_dv_port_id_action_resource *cache =
11466                         container_of(entry, typeof(*cache), entry);
11467
11468         claim_zero(mlx5_flow_os_destroy_flow_action(cache->action));
11469         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], cache->idx);
11470 }
11471
11472 /**
11473  * Release port ID action resource.
11474  *
11475  * @param dev
11476  *   Pointer to Ethernet device.
11477  * @param handle
11478  *   Pointer to mlx5_flow_handle.
11479  *
11480  * @return
11481  *   1 while a reference on it exists, 0 when freed.
11482  */
11483 static int
11484 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
11485                                         uint32_t port_id)
11486 {
11487         struct mlx5_priv *priv = dev->data->dev_private;
11488         struct mlx5_flow_dv_port_id_action_resource *cache;
11489
11490         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], port_id);
11491         if (!cache)
11492                 return 0;
11493         MLX5_ASSERT(cache->action);
11494         return mlx5_cache_unregister(&priv->sh->port_id_action_list,
11495                                      &cache->entry);
11496 }
11497
11498 /**
11499  * Release shared RSS action resource.
11500  *
11501  * @param dev
11502  *   Pointer to Ethernet device.
11503  * @param srss
11504  *   Shared RSS action index.
11505  */
11506 static void
11507 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss)
11508 {
11509         struct mlx5_priv *priv = dev->data->dev_private;
11510         struct mlx5_shared_action_rss *shared_rss;
11511
11512         shared_rss = mlx5_ipool_get
11513                         (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], srss);
11514         __atomic_sub_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
11515 }
11516
11517 void
11518 flow_dv_push_vlan_remove_cb(struct mlx5_cache_list *list,
11519                             struct mlx5_cache_entry *entry)
11520 {
11521         struct mlx5_dev_ctx_shared *sh = list->ctx;
11522         struct mlx5_flow_dv_push_vlan_action_resource *cache =
11523                         container_of(entry, typeof(*cache), entry);
11524
11525         claim_zero(mlx5_flow_os_destroy_flow_action(cache->action));
11526         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], cache->idx);
11527 }
11528
11529 /**
11530  * Release push vlan action resource.
11531  *
11532  * @param dev
11533  *   Pointer to Ethernet device.
11534  * @param handle
11535  *   Pointer to mlx5_flow_handle.
11536  *
11537  * @return
11538  *   1 while a reference on it exists, 0 when freed.
11539  */
11540 static int
11541 flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev,
11542                                           struct mlx5_flow_handle *handle)
11543 {
11544         struct mlx5_priv *priv = dev->data->dev_private;
11545         struct mlx5_flow_dv_push_vlan_action_resource *cache;
11546         uint32_t idx = handle->dvh.rix_push_vlan;
11547
11548         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
11549         if (!cache)
11550                 return 0;
11551         MLX5_ASSERT(cache->action);
11552         return mlx5_cache_unregister(&priv->sh->push_vlan_action_list,
11553                                      &cache->entry);
11554 }
11555
11556 /**
11557  * Release the fate resource.
11558  *
11559  * @param dev
11560  *   Pointer to Ethernet device.
11561  * @param handle
11562  *   Pointer to mlx5_flow_handle.
11563  */
11564 static void
11565 flow_dv_fate_resource_release(struct rte_eth_dev *dev,
11566                                struct mlx5_flow_handle *handle)
11567 {
11568         if (!handle->rix_fate)
11569                 return;
11570         switch (handle->fate_action) {
11571         case MLX5_FLOW_FATE_QUEUE:
11572                 mlx5_hrxq_release(dev, handle->rix_hrxq);
11573                 break;
11574         case MLX5_FLOW_FATE_JUMP:
11575                 flow_dv_jump_tbl_resource_release(dev, handle->rix_jump);
11576                 break;
11577         case MLX5_FLOW_FATE_PORT_ID:
11578                 flow_dv_port_id_action_resource_release(dev,
11579                                 handle->rix_port_id_action);
11580                 break;
11581         default:
11582                 DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
11583                 break;
11584         }
11585         handle->rix_fate = 0;
11586 }
11587
11588 void
11589 flow_dv_sample_remove_cb(struct mlx5_cache_list *list __rte_unused,
11590                          struct mlx5_cache_entry *entry)
11591 {
11592         struct mlx5_flow_dv_sample_resource *cache_resource =
11593                         container_of(entry, typeof(*cache_resource), entry);
11594         struct rte_eth_dev *dev = cache_resource->dev;
11595         struct mlx5_priv *priv = dev->data->dev_private;
11596
11597         if (cache_resource->verbs_action)
11598                 claim_zero(mlx5_flow_os_destroy_flow_action
11599                                 (cache_resource->verbs_action));
11600         if (cache_resource->normal_path_tbl)
11601                 flow_dv_tbl_resource_release(MLX5_SH(dev),
11602                         cache_resource->normal_path_tbl);
11603         flow_dv_sample_sub_actions_release(dev,
11604                                 &cache_resource->sample_idx);
11605         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
11606                         cache_resource->idx);
11607         DRV_LOG(DEBUG, "sample resource %p: removed",
11608                 (void *)cache_resource);
11609 }
11610
11611 /**
11612  * Release an sample resource.
11613  *
11614  * @param dev
11615  *   Pointer to Ethernet device.
11616  * @param handle
11617  *   Pointer to mlx5_flow_handle.
11618  *
11619  * @return
11620  *   1 while a reference on it exists, 0 when freed.
11621  */
11622 static int
11623 flow_dv_sample_resource_release(struct rte_eth_dev *dev,
11624                                      struct mlx5_flow_handle *handle)
11625 {
11626         struct mlx5_priv *priv = dev->data->dev_private;
11627         struct mlx5_flow_dv_sample_resource *cache_resource;
11628
11629         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
11630                          handle->dvh.rix_sample);
11631         if (!cache_resource)
11632                 return 0;
11633         MLX5_ASSERT(cache_resource->verbs_action);
11634         return mlx5_cache_unregister(&priv->sh->sample_action_list,
11635                                      &cache_resource->entry);
11636 }
11637
11638 void
11639 flow_dv_dest_array_remove_cb(struct mlx5_cache_list *list __rte_unused,
11640                              struct mlx5_cache_entry *entry)
11641 {
11642         struct mlx5_flow_dv_dest_array_resource *cache_resource =
11643                         container_of(entry, typeof(*cache_resource), entry);
11644         struct rte_eth_dev *dev = cache_resource->dev;
11645         struct mlx5_priv *priv = dev->data->dev_private;
11646         uint32_t i = 0;
11647
11648         MLX5_ASSERT(cache_resource->action);
11649         if (cache_resource->action)
11650                 claim_zero(mlx5_flow_os_destroy_flow_action
11651                                         (cache_resource->action));
11652         for (; i < cache_resource->num_of_dest; i++)
11653                 flow_dv_sample_sub_actions_release(dev,
11654                                 &cache_resource->sample_idx[i]);
11655         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
11656                         cache_resource->idx);
11657         DRV_LOG(DEBUG, "destination array resource %p: removed",
11658                 (void *)cache_resource);
11659 }
11660
11661 /**
11662  * Release an destination array resource.
11663  *
11664  * @param dev
11665  *   Pointer to Ethernet device.
11666  * @param handle
11667  *   Pointer to mlx5_flow_handle.
11668  *
11669  * @return
11670  *   1 while a reference on it exists, 0 when freed.
11671  */
11672 static int
11673 flow_dv_dest_array_resource_release(struct rte_eth_dev *dev,
11674                                     struct mlx5_flow_handle *handle)
11675 {
11676         struct mlx5_priv *priv = dev->data->dev_private;
11677         struct mlx5_flow_dv_dest_array_resource *cache;
11678
11679         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
11680                                handle->dvh.rix_dest_array);
11681         if (!cache)
11682                 return 0;
11683         MLX5_ASSERT(cache->action);
11684         return mlx5_cache_unregister(&priv->sh->dest_array_list,
11685                                      &cache->entry);
11686 }
11687
11688 static void
11689 flow_dv_geneve_tlv_option_resource_release(struct rte_eth_dev *dev)
11690 {
11691         struct mlx5_priv *priv = dev->data->dev_private;
11692         struct mlx5_dev_ctx_shared *sh = priv->sh;
11693         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
11694                                 sh->geneve_tlv_option_resource;
11695         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
11696         if (geneve_opt_resource) {
11697                 if (!(__atomic_sub_fetch(&geneve_opt_resource->refcnt, 1,
11698                                          __ATOMIC_RELAXED))) {
11699                         claim_zero(mlx5_devx_cmd_destroy
11700                                         (geneve_opt_resource->obj));
11701                         mlx5_free(sh->geneve_tlv_option_resource);
11702                         sh->geneve_tlv_option_resource = NULL;
11703                 }
11704         }
11705         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
11706 }
11707
11708 /**
11709  * Remove the flow from the NIC but keeps it in memory.
11710  * Lock free, (mutex should be acquired by caller).
11711  *
11712  * @param[in] dev
11713  *   Pointer to Ethernet device.
11714  * @param[in, out] flow
11715  *   Pointer to flow structure.
11716  */
11717 static void
11718 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
11719 {
11720         struct mlx5_flow_handle *dh;
11721         uint32_t handle_idx;
11722         struct mlx5_priv *priv = dev->data->dev_private;
11723
11724         if (!flow)
11725                 return;
11726         handle_idx = flow->dev_handles;
11727         while (handle_idx) {
11728                 dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
11729                                     handle_idx);
11730                 if (!dh)
11731                         return;
11732                 if (dh->drv_flow) {
11733                         claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow));
11734                         dh->drv_flow = NULL;
11735                 }
11736                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
11737                         flow_dv_fate_resource_release(dev, dh);
11738                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
11739                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
11740                 handle_idx = dh->next.next;
11741         }
11742 }
11743
11744 /**
11745  * Remove the flow from the NIC and the memory.
11746  * Lock free, (mutex should be acquired by caller).
11747  *
11748  * @param[in] dev
11749  *   Pointer to the Ethernet device structure.
11750  * @param[in, out] flow
11751  *   Pointer to flow structure.
11752  */
11753 static void
11754 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
11755 {
11756         struct mlx5_flow_handle *dev_handle;
11757         struct mlx5_priv *priv = dev->data->dev_private;
11758         uint32_t srss = 0;
11759
11760         if (!flow)
11761                 return;
11762         flow_dv_remove(dev, flow);
11763         if (flow->counter) {
11764                 flow_dv_counter_free(dev, flow->counter);
11765                 flow->counter = 0;
11766         }
11767         if (flow->meter) {
11768                 struct mlx5_flow_meter *fm;
11769
11770                 fm = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MTR],
11771                                     flow->meter);
11772                 if (fm)
11773                         mlx5_flow_meter_detach(fm);
11774                 flow->meter = 0;
11775         }
11776         if (flow->age)
11777                 flow_dv_aso_age_release(dev, flow->age);
11778         if (flow->geneve_tlv_option) {
11779                 flow_dv_geneve_tlv_option_resource_release(dev);
11780                 flow->geneve_tlv_option = 0;
11781         }
11782         while (flow->dev_handles) {
11783                 uint32_t tmp_idx = flow->dev_handles;
11784
11785                 dev_handle = mlx5_ipool_get(priv->sh->ipool
11786                                             [MLX5_IPOOL_MLX5_FLOW], tmp_idx);
11787                 if (!dev_handle)
11788                         return;
11789                 flow->dev_handles = dev_handle->next.next;
11790                 if (dev_handle->dvh.matcher)
11791                         flow_dv_matcher_release(dev, dev_handle);
11792                 if (dev_handle->dvh.rix_sample)
11793                         flow_dv_sample_resource_release(dev, dev_handle);
11794                 if (dev_handle->dvh.rix_dest_array)
11795                         flow_dv_dest_array_resource_release(dev, dev_handle);
11796                 if (dev_handle->dvh.rix_encap_decap)
11797                         flow_dv_encap_decap_resource_release(dev,
11798                                 dev_handle->dvh.rix_encap_decap);
11799                 if (dev_handle->dvh.modify_hdr)
11800                         flow_dv_modify_hdr_resource_release(dev, dev_handle);
11801                 if (dev_handle->dvh.rix_push_vlan)
11802                         flow_dv_push_vlan_action_resource_release(dev,
11803                                                                   dev_handle);
11804                 if (dev_handle->dvh.rix_tag)
11805                         flow_dv_tag_release(dev,
11806                                             dev_handle->dvh.rix_tag);
11807                 if (dev_handle->fate_action != MLX5_FLOW_FATE_SHARED_RSS)
11808                         flow_dv_fate_resource_release(dev, dev_handle);
11809                 else if (!srss)
11810                         srss = dev_handle->rix_srss;
11811                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
11812                            tmp_idx);
11813         }
11814         if (srss)
11815                 flow_dv_shared_rss_action_release(dev, srss);
11816 }
11817
11818 /**
11819  * Release array of hash RX queue objects.
11820  * Helper function.
11821  *
11822  * @param[in] dev
11823  *   Pointer to the Ethernet device structure.
11824  * @param[in, out] hrxqs
11825  *   Array of hash RX queue objects.
11826  *
11827  * @return
11828  *   Total number of references to hash RX queue objects in *hrxqs* array
11829  *   after this operation.
11830  */
11831 static int
11832 __flow_dv_hrxqs_release(struct rte_eth_dev *dev,
11833                         uint32_t (*hrxqs)[MLX5_RSS_HASH_FIELDS_LEN])
11834 {
11835         size_t i;
11836         int remaining = 0;
11837
11838         for (i = 0; i < RTE_DIM(*hrxqs); i++) {
11839                 int ret = mlx5_hrxq_release(dev, (*hrxqs)[i]);
11840
11841                 if (!ret)
11842                         (*hrxqs)[i] = 0;
11843                 remaining += ret;
11844         }
11845         return remaining;
11846 }
11847
11848 /**
11849  * Release all hash RX queue objects representing shared RSS action.
11850  *
11851  * @param[in] dev
11852  *   Pointer to the Ethernet device structure.
11853  * @param[in, out] action
11854  *   Shared RSS action to remove hash RX queue objects from.
11855  *
11856  * @return
11857  *   Total number of references to hash RX queue objects stored in *action*
11858  *   after this operation.
11859  *   Expected to be 0 if no external references held.
11860  */
11861 static int
11862 __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
11863                                  struct mlx5_shared_action_rss *action)
11864 {
11865         return __flow_dv_hrxqs_release(dev, &action->hrxq) +
11866                 __flow_dv_hrxqs_release(dev, &action->hrxq_tunnel);
11867 }
11868
11869 /**
11870  * Setup shared RSS action.
11871  * Prepare set of hash RX queue objects sufficient to handle all valid
11872  * hash_fields combinations (see enum ibv_rx_hash_fields).
11873  *
11874  * @param[in] dev
11875  *   Pointer to the Ethernet device structure.
11876  * @param[in] action_idx
11877  *   Shared RSS action ipool index.
11878  * @param[in, out] action
11879  *   Partially initialized shared RSS action.
11880  * @param[out] error
11881  *   Perform verbose error reporting if not NULL. Initialized in case of
11882  *   error only.
11883  *
11884  * @return
11885  *   0 on success, otherwise negative errno value.
11886  */
11887 static int
11888 __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
11889                            uint32_t action_idx,
11890                            struct mlx5_shared_action_rss *action,
11891                            struct rte_flow_error *error)
11892 {
11893         struct mlx5_flow_rss_desc rss_desc = { 0 };
11894         size_t i;
11895         int err;
11896
11897         if (mlx5_ind_table_obj_setup(dev, action->ind_tbl)) {
11898                 return rte_flow_error_set(error, rte_errno,
11899                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11900                                           "cannot setup indirection table");
11901         }
11902         memcpy(rss_desc.key, action->origin.key, MLX5_RSS_HASH_KEY_LEN);
11903         rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
11904         rss_desc.const_q = action->origin.queue;
11905         rss_desc.queue_num = action->origin.queue_num;
11906         /* Set non-zero value to indicate a shared RSS. */
11907         rss_desc.shared_rss = action_idx;
11908         rss_desc.ind_tbl = action->ind_tbl;
11909         for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
11910                 uint32_t hrxq_idx;
11911                 uint64_t hash_fields = mlx5_rss_hash_fields[i];
11912                 int tunnel;
11913
11914                 for (tunnel = 0; tunnel < 2; tunnel++) {
11915                         rss_desc.tunnel = tunnel;
11916                         rss_desc.hash_fields = hash_fields;
11917                         hrxq_idx = mlx5_hrxq_get(dev, &rss_desc);
11918                         if (!hrxq_idx) {
11919                                 rte_flow_error_set
11920                                         (error, rte_errno,
11921                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11922                                          "cannot get hash queue");
11923                                 goto error_hrxq_new;
11924                         }
11925                         err = __flow_dv_action_rss_hrxq_set
11926                                 (action, hash_fields, tunnel, hrxq_idx);
11927                         MLX5_ASSERT(!err);
11928                 }
11929         }
11930         return 0;
11931 error_hrxq_new:
11932         err = rte_errno;
11933         __flow_dv_action_rss_hrxqs_release(dev, action);
11934         if (!mlx5_ind_table_obj_release(dev, action->ind_tbl, true))
11935                 action->ind_tbl = NULL;
11936         rte_errno = err;
11937         return -rte_errno;
11938 }
11939
11940 /**
11941  * Create shared RSS action.
11942  *
11943  * @param[in] dev
11944  *   Pointer to the Ethernet device structure.
11945  * @param[in] conf
11946  *   Shared action configuration.
11947  * @param[in] rss
11948  *   RSS action specification used to create shared action.
11949  * @param[out] error
11950  *   Perform verbose error reporting if not NULL. Initialized in case of
11951  *   error only.
11952  *
11953  * @return
11954  *   A valid shared action ID in case of success, 0 otherwise and
11955  *   rte_errno is set.
11956  */
11957 static uint32_t
11958 __flow_dv_action_rss_create(struct rte_eth_dev *dev,
11959                             const struct rte_flow_shared_action_conf *conf,
11960                             const struct rte_flow_action_rss *rss,
11961                             struct rte_flow_error *error)
11962 {
11963         struct mlx5_priv *priv = dev->data->dev_private;
11964         struct mlx5_shared_action_rss *shared_action = NULL;
11965         void *queue = NULL;
11966         struct rte_flow_action_rss *origin;
11967         const uint8_t *rss_key;
11968         uint32_t queue_size = rss->queue_num * sizeof(uint16_t);
11969         uint32_t idx;
11970
11971         RTE_SET_USED(conf);
11972         queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
11973                             0, SOCKET_ID_ANY);
11974         shared_action = mlx5_ipool_zmalloc
11975                          (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], &idx);
11976         if (!shared_action || !queue) {
11977                 rte_flow_error_set(error, ENOMEM,
11978                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11979                                    "cannot allocate resource memory");
11980                 goto error_rss_init;
11981         }
11982         if (idx > (1u << MLX5_SHARED_ACTION_TYPE_OFFSET)) {
11983                 rte_flow_error_set(error, E2BIG,
11984                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11985                                    "rss action number out of range");
11986                 goto error_rss_init;
11987         }
11988         shared_action->ind_tbl = mlx5_malloc(MLX5_MEM_ZERO,
11989                                              sizeof(*shared_action->ind_tbl),
11990                                              0, SOCKET_ID_ANY);
11991         if (!shared_action->ind_tbl) {
11992                 rte_flow_error_set(error, ENOMEM,
11993                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11994                                    "cannot allocate resource memory");
11995                 goto error_rss_init;
11996         }
11997         memcpy(queue, rss->queue, queue_size);
11998         shared_action->ind_tbl->queues = queue;
11999         shared_action->ind_tbl->queues_n = rss->queue_num;
12000         origin = &shared_action->origin;
12001         origin->func = rss->func;
12002         origin->level = rss->level;
12003         /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
12004         origin->types = !rss->types ? ETH_RSS_IP : rss->types;
12005         /* NULL RSS key indicates default RSS key. */
12006         rss_key = !rss->key ? rss_hash_default_key : rss->key;
12007         memcpy(shared_action->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
12008         origin->key = &shared_action->key[0];
12009         origin->key_len = MLX5_RSS_HASH_KEY_LEN;
12010         origin->queue = queue;
12011         origin->queue_num = rss->queue_num;
12012         if (__flow_dv_action_rss_setup(dev, idx, shared_action, error))
12013                 goto error_rss_init;
12014         rte_spinlock_init(&shared_action->action_rss_sl);
12015         __atomic_add_fetch(&shared_action->refcnt, 1, __ATOMIC_RELAXED);
12016         rte_spinlock_lock(&priv->shared_act_sl);
12017         ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
12018                      &priv->rss_shared_actions, idx, shared_action, next);
12019         rte_spinlock_unlock(&priv->shared_act_sl);
12020         return idx;
12021 error_rss_init:
12022         if (shared_action) {
12023                 if (shared_action->ind_tbl)
12024                         mlx5_free(shared_action->ind_tbl);
12025                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
12026                                 idx);
12027         }
12028         if (queue)
12029                 mlx5_free(queue);
12030         return 0;
12031 }
12032
12033 /**
12034  * Destroy the shared RSS action.
12035  * Release related hash RX queue objects.
12036  *
12037  * @param[in] dev
12038  *   Pointer to the Ethernet device structure.
12039  * @param[in] idx
12040  *   The shared RSS action object ID to be removed.
12041  * @param[out] error
12042  *   Perform verbose error reporting if not NULL. Initialized in case of
12043  *   error only.
12044  *
12045  * @return
12046  *   0 on success, otherwise negative errno value.
12047  */
12048 static int
12049 __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx,
12050                              struct rte_flow_error *error)
12051 {
12052         struct mlx5_priv *priv = dev->data->dev_private;
12053         struct mlx5_shared_action_rss *shared_rss =
12054             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
12055         uint32_t old_refcnt = 1;
12056         int remaining;
12057         uint16_t *queue = NULL;
12058
12059         if (!shared_rss)
12060                 return rte_flow_error_set(error, EINVAL,
12061                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12062                                           "invalid shared action");
12063         remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
12064         if (remaining)
12065                 return rte_flow_error_set(error, EBUSY,
12066                                           RTE_FLOW_ERROR_TYPE_ACTION,
12067                                           NULL,
12068                                           "shared rss hrxq has references");
12069         queue = shared_rss->ind_tbl->queues;
12070         remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true);
12071         if (remaining)
12072                 return rte_flow_error_set(error, EBUSY,
12073                                           RTE_FLOW_ERROR_TYPE_ACTION,
12074                                           NULL,
12075                                           "shared rss indirection table has"
12076                                           " references");
12077         if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt,
12078                                          0, 0, __ATOMIC_ACQUIRE,
12079                                          __ATOMIC_RELAXED))
12080                 return rte_flow_error_set(error, EBUSY,
12081                                           RTE_FLOW_ERROR_TYPE_ACTION,
12082                                           NULL,
12083                                           "shared rss has references");
12084         mlx5_free(queue);
12085         rte_spinlock_lock(&priv->shared_act_sl);
12086         ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
12087                      &priv->rss_shared_actions, idx, shared_rss, next);
12088         rte_spinlock_unlock(&priv->shared_act_sl);
12089         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
12090                         idx);
12091         return 0;
12092 }
12093
12094 /**
12095  * Create shared action, lock free,
12096  * (mutex should be acquired by caller).
12097  * Dispatcher for action type specific call.
12098  *
12099  * @param[in] dev
12100  *   Pointer to the Ethernet device structure.
12101  * @param[in] conf
12102  *   Shared action configuration.
12103  * @param[in] action
12104  *   Action specification used to create shared action.
12105  * @param[out] error
12106  *   Perform verbose error reporting if not NULL. Initialized in case of
12107  *   error only.
12108  *
12109  * @return
12110  *   A valid shared action handle in case of success, NULL otherwise and
12111  *   rte_errno is set.
12112  */
12113 static struct rte_flow_shared_action *
12114 flow_dv_action_create(struct rte_eth_dev *dev,
12115                       const struct rte_flow_shared_action_conf *conf,
12116                       const struct rte_flow_action *action,
12117                       struct rte_flow_error *err)
12118 {
12119         uint32_t idx = 0;
12120         uint32_t ret = 0;
12121
12122         switch (action->type) {
12123         case RTE_FLOW_ACTION_TYPE_RSS:
12124                 ret = __flow_dv_action_rss_create(dev, conf, action->conf, err);
12125                 idx = (MLX5_SHARED_ACTION_TYPE_RSS <<
12126                        MLX5_SHARED_ACTION_TYPE_OFFSET) | ret;
12127                 break;
12128         case RTE_FLOW_ACTION_TYPE_AGE:
12129                 ret = flow_dv_translate_create_aso_age(dev, action->conf, err);
12130                 idx = (MLX5_SHARED_ACTION_TYPE_AGE <<
12131                        MLX5_SHARED_ACTION_TYPE_OFFSET) | ret;
12132                 if (ret) {
12133                         struct mlx5_aso_age_action *aso_age =
12134                                               flow_aso_age_get_by_idx(dev, ret);
12135
12136                         if (!aso_age->age_params.context)
12137                                 aso_age->age_params.context =
12138                                                          (void *)(uintptr_t)idx;
12139                 }
12140                 break;
12141         default:
12142                 rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
12143                                    NULL, "action type not supported");
12144                 break;
12145         }
12146         return ret ? (struct rte_flow_shared_action *)(uintptr_t)idx : NULL;
12147 }
12148
12149 /**
12150  * Destroy the shared action.
12151  * Release action related resources on the NIC and the memory.
12152  * Lock free, (mutex should be acquired by caller).
12153  * Dispatcher for action type specific call.
12154  *
12155  * @param[in] dev
12156  *   Pointer to the Ethernet device structure.
12157  * @param[in] action
12158  *   The shared action object to be removed.
12159  * @param[out] error
12160  *   Perform verbose error reporting if not NULL. Initialized in case of
12161  *   error only.
12162  *
12163  * @return
12164  *   0 on success, otherwise negative errno value.
12165  */
12166 static int
12167 flow_dv_action_destroy(struct rte_eth_dev *dev,
12168                        struct rte_flow_shared_action *action,
12169                        struct rte_flow_error *error)
12170 {
12171         uint32_t act_idx = (uint32_t)(uintptr_t)action;
12172         uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET;
12173         uint32_t idx = act_idx & ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) - 1);
12174         int ret;
12175
12176         switch (type) {
12177         case MLX5_SHARED_ACTION_TYPE_RSS:
12178                 return __flow_dv_action_rss_release(dev, idx, error);
12179         case MLX5_SHARED_ACTION_TYPE_AGE:
12180                 ret = flow_dv_aso_age_release(dev, idx);
12181                 if (ret)
12182                         /*
12183                          * In this case, the last flow has a reference will
12184                          * actually release the age action.
12185                          */
12186                         DRV_LOG(DEBUG, "Shared age action %" PRIu32 " was"
12187                                 " released with references %d.", idx, ret);
12188                 return 0;
12189         default:
12190                 return rte_flow_error_set(error, ENOTSUP,
12191                                           RTE_FLOW_ERROR_TYPE_ACTION,
12192                                           NULL,
12193                                           "action type not supported");
12194         }
12195 }
12196
12197 /**
12198  * Updates in place shared RSS action configuration.
12199  *
12200  * @param[in] dev
12201  *   Pointer to the Ethernet device structure.
12202  * @param[in] idx
12203  *   The shared RSS action object ID to be updated.
12204  * @param[in] action_conf
12205  *   RSS action specification used to modify *shared_rss*.
12206  * @param[out] error
12207  *   Perform verbose error reporting if not NULL. Initialized in case of
12208  *   error only.
12209  *
12210  * @return
12211  *   0 on success, otherwise negative errno value.
12212  * @note: currently only support update of RSS queues.
12213  */
12214 static int
12215 __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx,
12216                             const struct rte_flow_action_rss *action_conf,
12217                             struct rte_flow_error *error)
12218 {
12219         struct mlx5_priv *priv = dev->data->dev_private;
12220         struct mlx5_shared_action_rss *shared_rss =
12221             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
12222         int ret = 0;
12223         void *queue = NULL;
12224         uint16_t *queue_old = NULL;
12225         uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);
12226
12227         if (!shared_rss)
12228                 return rte_flow_error_set(error, EINVAL,
12229                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12230                                           "invalid shared action to update");
12231         queue = mlx5_malloc(MLX5_MEM_ZERO,
12232                             RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
12233                             0, SOCKET_ID_ANY);
12234         if (!queue)
12235                 return rte_flow_error_set(error, ENOMEM,
12236                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12237                                           NULL,
12238                                           "cannot allocate resource memory");
12239         memcpy(queue, action_conf->queue, queue_size);
12240         MLX5_ASSERT(shared_rss->ind_tbl);
12241         rte_spinlock_lock(&shared_rss->action_rss_sl);
12242         queue_old = shared_rss->ind_tbl->queues;
12243         ret = mlx5_ind_table_obj_modify(dev, shared_rss->ind_tbl,
12244                                         queue, action_conf->queue_num, true);
12245         if (ret) {
12246                 mlx5_free(queue);
12247                 ret = rte_flow_error_set(error, rte_errno,
12248                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12249                                           "cannot update indirection table");
12250         } else {
12251                 mlx5_free(queue_old);
12252                 shared_rss->origin.queue = queue;
12253                 shared_rss->origin.queue_num = action_conf->queue_num;
12254         }
12255         rte_spinlock_unlock(&shared_rss->action_rss_sl);
12256         return ret;
12257 }
12258
12259 /**
12260  * Updates in place shared action configuration, lock free,
12261  * (mutex should be acquired by caller).
12262  *
12263  * @param[in] dev
12264  *   Pointer to the Ethernet device structure.
12265  * @param[in] action
12266  *   The shared action object to be updated.
12267  * @param[in] action_conf
12268  *   Action specification used to modify *action*.
12269  *   *action_conf* should be of type correlating with type of the *action*,
12270  *   otherwise considered as invalid.
12271  * @param[out] error
12272  *   Perform verbose error reporting if not NULL. Initialized in case of
12273  *   error only.
12274  *
12275  * @return
12276  *   0 on success, otherwise negative errno value.
12277  */
12278 static int
12279 flow_dv_action_update(struct rte_eth_dev *dev,
12280                         struct rte_flow_shared_action *action,
12281                         const void *action_conf,
12282                         struct rte_flow_error *err)
12283 {
12284         uint32_t act_idx = (uint32_t)(uintptr_t)action;
12285         uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET;
12286         uint32_t idx = act_idx & ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) - 1);
12287
12288         switch (type) {
12289         case MLX5_SHARED_ACTION_TYPE_RSS:
12290                 return __flow_dv_action_rss_update(dev, idx, action_conf, err);
12291         default:
12292                 return rte_flow_error_set(err, ENOTSUP,
12293                                           RTE_FLOW_ERROR_TYPE_ACTION,
12294                                           NULL,
12295                                           "action type update not supported");
12296         }
12297 }
12298
12299 static int
12300 flow_dv_action_query(struct rte_eth_dev *dev,
12301                      const struct rte_flow_shared_action *action, void *data,
12302                      struct rte_flow_error *error)
12303 {
12304         struct mlx5_age_param *age_param;
12305         struct rte_flow_query_age *resp;
12306         uint32_t act_idx = (uint32_t)(uintptr_t)action;
12307         uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET;
12308         uint32_t idx = act_idx & ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) - 1);
12309
12310         switch (type) {
12311         case MLX5_SHARED_ACTION_TYPE_AGE:
12312                 age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params;
12313                 resp = data;
12314                 resp->aged = __atomic_load_n(&age_param->state,
12315                                               __ATOMIC_RELAXED) == AGE_TMOUT ?
12316                                                                           1 : 0;
12317                 resp->sec_since_last_hit_valid = !resp->aged;
12318                 if (resp->sec_since_last_hit_valid)
12319                         resp->sec_since_last_hit = __atomic_load_n
12320                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
12321                 return 0;
12322         default:
12323                 return rte_flow_error_set(error, ENOTSUP,
12324                                           RTE_FLOW_ERROR_TYPE_ACTION,
12325                                           NULL,
12326                                           "action type query not supported");
12327         }
12328 }
12329
12330 /**
12331  * Query a dv flow  rule for its statistics via devx.
12332  *
12333  * @param[in] dev
12334  *   Pointer to Ethernet device.
12335  * @param[in] flow
12336  *   Pointer to the sub flow.
12337  * @param[out] data
12338  *   data retrieved by the query.
12339  * @param[out] error
12340  *   Perform verbose error reporting if not NULL.
12341  *
12342  * @return
12343  *   0 on success, a negative errno value otherwise and rte_errno is set.
12344  */
12345 static int
12346 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
12347                     void *data, struct rte_flow_error *error)
12348 {
12349         struct mlx5_priv *priv = dev->data->dev_private;
12350         struct rte_flow_query_count *qc = data;
12351
12352         if (!priv->config.devx)
12353                 return rte_flow_error_set(error, ENOTSUP,
12354                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12355                                           NULL,
12356                                           "counters are not supported");
12357         if (flow->counter) {
12358                 uint64_t pkts, bytes;
12359                 struct mlx5_flow_counter *cnt;
12360
12361                 cnt = flow_dv_counter_get_by_idx(dev, flow->counter,
12362                                                  NULL);
12363                 int err = _flow_dv_query_count(dev, flow->counter, &pkts,
12364                                                &bytes);
12365
12366                 if (err)
12367                         return rte_flow_error_set(error, -err,
12368                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12369                                         NULL, "cannot read counters");
12370                 qc->hits_set = 1;
12371                 qc->bytes_set = 1;
12372                 qc->hits = pkts - cnt->hits;
12373                 qc->bytes = bytes - cnt->bytes;
12374                 if (qc->reset) {
12375                         cnt->hits = pkts;
12376                         cnt->bytes = bytes;
12377                 }
12378                 return 0;
12379         }
12380         return rte_flow_error_set(error, EINVAL,
12381                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12382                                   NULL,
12383                                   "counters are not available");
12384 }
12385
12386 /**
12387  * Query a flow rule AGE action for aging information.
12388  *
12389  * @param[in] dev
12390  *   Pointer to Ethernet device.
12391  * @param[in] flow
12392  *   Pointer to the sub flow.
12393  * @param[out] data
12394  *   data retrieved by the query.
12395  * @param[out] error
12396  *   Perform verbose error reporting if not NULL.
12397  *
12398  * @return
12399  *   0 on success, a negative errno value otherwise and rte_errno is set.
12400  */
12401 static int
12402 flow_dv_query_age(struct rte_eth_dev *dev, struct rte_flow *flow,
12403                   void *data, struct rte_flow_error *error)
12404 {
12405         struct rte_flow_query_age *resp = data;
12406         struct mlx5_age_param *age_param;
12407
12408         if (flow->age) {
12409                 struct mlx5_aso_age_action *act =
12410                                      flow_aso_age_get_by_idx(dev, flow->age);
12411
12412                 age_param = &act->age_params;
12413         } else if (flow->counter) {
12414                 age_param = flow_dv_counter_idx_get_age(dev, flow->counter);
12415
12416                 if (!age_param || !age_param->timeout)
12417                         return rte_flow_error_set
12418                                         (error, EINVAL,
12419                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12420                                          NULL, "cannot read age data");
12421         } else {
12422                 return rte_flow_error_set(error, EINVAL,
12423                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12424                                           NULL, "age data not available");
12425         }
12426         resp->aged = __atomic_load_n(&age_param->state, __ATOMIC_RELAXED) ==
12427                                      AGE_TMOUT ? 1 : 0;
12428         resp->sec_since_last_hit_valid = !resp->aged;
12429         if (resp->sec_since_last_hit_valid)
12430                 resp->sec_since_last_hit = __atomic_load_n
12431                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
12432         return 0;
12433 }
12434
12435 /**
12436  * Query a flow.
12437  *
12438  * @see rte_flow_query()
12439  * @see rte_flow_ops
12440  */
12441 static int
12442 flow_dv_query(struct rte_eth_dev *dev,
12443               struct rte_flow *flow __rte_unused,
12444               const struct rte_flow_action *actions __rte_unused,
12445               void *data __rte_unused,
12446               struct rte_flow_error *error __rte_unused)
12447 {
12448         int ret = -EINVAL;
12449
12450         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
12451                 switch (actions->type) {
12452                 case RTE_FLOW_ACTION_TYPE_VOID:
12453                         break;
12454                 case RTE_FLOW_ACTION_TYPE_COUNT:
12455                         ret = flow_dv_query_count(dev, flow, data, error);
12456                         break;
12457                 case RTE_FLOW_ACTION_TYPE_AGE:
12458                         ret = flow_dv_query_age(dev, flow, data, error);
12459                         break;
12460                 default:
12461                         return rte_flow_error_set(error, ENOTSUP,
12462                                                   RTE_FLOW_ERROR_TYPE_ACTION,
12463                                                   actions,
12464                                                   "action not supported");
12465                 }
12466         }
12467         return ret;
12468 }
12469
12470 /**
12471  * Destroy the meter table set.
12472  * Lock free, (mutex should be acquired by caller).
12473  *
12474  * @param[in] dev
12475  *   Pointer to Ethernet device.
12476  * @param[in] tbl
12477  *   Pointer to the meter table set.
12478  *
12479  * @return
12480  *   Always 0.
12481  */
12482 static int
12483 flow_dv_destroy_mtr_tbl(struct rte_eth_dev *dev,
12484                         struct mlx5_meter_domains_infos *tbl)
12485 {
12486         struct mlx5_priv *priv = dev->data->dev_private;
12487         struct mlx5_meter_domains_infos *mtd =
12488                                 (struct mlx5_meter_domains_infos *)tbl;
12489
12490         if (!mtd || !priv->config.dv_flow_en)
12491                 return 0;
12492         if (mtd->ingress.policer_rules[RTE_MTR_DROPPED])
12493                 claim_zero(mlx5_flow_os_destroy_flow
12494                            (mtd->ingress.policer_rules[RTE_MTR_DROPPED]));
12495         if (mtd->egress.policer_rules[RTE_MTR_DROPPED])
12496                 claim_zero(mlx5_flow_os_destroy_flow
12497                            (mtd->egress.policer_rules[RTE_MTR_DROPPED]));
12498         if (mtd->transfer.policer_rules[RTE_MTR_DROPPED])
12499                 claim_zero(mlx5_flow_os_destroy_flow
12500                            (mtd->transfer.policer_rules[RTE_MTR_DROPPED]));
12501         if (mtd->egress.color_matcher)
12502                 claim_zero(mlx5_flow_os_destroy_flow_matcher
12503                            (mtd->egress.color_matcher));
12504         if (mtd->egress.any_matcher)
12505                 claim_zero(mlx5_flow_os_destroy_flow_matcher
12506                            (mtd->egress.any_matcher));
12507         if (mtd->egress.tbl)
12508                 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->egress.tbl);
12509         if (mtd->egress.sfx_tbl)
12510                 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->egress.sfx_tbl);
12511         if (mtd->ingress.color_matcher)
12512                 claim_zero(mlx5_flow_os_destroy_flow_matcher
12513                            (mtd->ingress.color_matcher));
12514         if (mtd->ingress.any_matcher)
12515                 claim_zero(mlx5_flow_os_destroy_flow_matcher
12516                            (mtd->ingress.any_matcher));
12517         if (mtd->ingress.tbl)
12518                 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->ingress.tbl);
12519         if (mtd->ingress.sfx_tbl)
12520                 flow_dv_tbl_resource_release(MLX5_SH(dev),
12521                                              mtd->ingress.sfx_tbl);
12522         if (mtd->transfer.color_matcher)
12523                 claim_zero(mlx5_flow_os_destroy_flow_matcher
12524                            (mtd->transfer.color_matcher));
12525         if (mtd->transfer.any_matcher)
12526                 claim_zero(mlx5_flow_os_destroy_flow_matcher
12527                            (mtd->transfer.any_matcher));
12528         if (mtd->transfer.tbl)
12529                 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->transfer.tbl);
12530         if (mtd->transfer.sfx_tbl)
12531                 flow_dv_tbl_resource_release(MLX5_SH(dev),
12532                                              mtd->transfer.sfx_tbl);
12533         if (mtd->drop_actn)
12534                 claim_zero(mlx5_flow_os_destroy_flow_action(mtd->drop_actn));
12535         mlx5_free(mtd);
12536         return 0;
12537 }
12538
12539 /* Number of meter flow actions, count and jump or count and drop. */
12540 #define METER_ACTIONS 2
12541
12542 /**
12543  * Create specify domain meter table and suffix table.
12544  *
12545  * @param[in] dev
12546  *   Pointer to Ethernet device.
12547  * @param[in,out] mtb
12548  *   Pointer to DV meter table set.
12549  * @param[in] egress
12550  *   Table attribute.
12551  * @param[in] transfer
12552  *   Table attribute.
12553  * @param[in] color_reg_c_idx
12554  *   Reg C index for color match.
12555  *
12556  * @return
12557  *   0 on success, -1 otherwise and rte_errno is set.
12558  */
12559 static int
12560 flow_dv_prepare_mtr_tables(struct rte_eth_dev *dev,
12561                            struct mlx5_meter_domains_infos *mtb,
12562                            uint8_t egress, uint8_t transfer,
12563                            uint32_t color_reg_c_idx)
12564 {
12565         struct mlx5_priv *priv = dev->data->dev_private;
12566         struct mlx5_dev_ctx_shared *sh = priv->sh;
12567         struct mlx5_flow_dv_match_params mask = {
12568                 .size = sizeof(mask.buf),
12569         };
12570         struct mlx5_flow_dv_match_params value = {
12571                 .size = sizeof(value.buf),
12572         };
12573         struct mlx5dv_flow_matcher_attr dv_attr = {
12574                 .type = IBV_FLOW_ATTR_NORMAL,
12575                 .priority = 0,
12576                 .match_criteria_enable = 0,
12577                 .match_mask = (void *)&mask,
12578         };
12579         void *actions[METER_ACTIONS];
12580         struct mlx5_meter_domain_info *dtb;
12581         struct rte_flow_error error;
12582         int i = 0;
12583         int ret;
12584
12585         if (transfer)
12586                 dtb = &mtb->transfer;
12587         else if (egress)
12588                 dtb = &mtb->egress;
12589         else
12590                 dtb = &mtb->ingress;
12591         /* Create the meter table with METER level. */
12592         dtb->tbl = flow_dv_tbl_resource_get(dev, MLX5_FLOW_TABLE_LEVEL_METER,
12593                                             egress, transfer, false, NULL, 0,
12594                                             0, &error);
12595         if (!dtb->tbl) {
12596                 DRV_LOG(ERR, "Failed to create meter policer table.");
12597                 return -1;
12598         }
12599         /* Create the meter suffix table with SUFFIX level. */
12600         dtb->sfx_tbl = flow_dv_tbl_resource_get(dev,
12601                                             MLX5_FLOW_TABLE_LEVEL_SUFFIX,
12602                                             egress, transfer, false, NULL, 0,
12603                                             0, &error);
12604         if (!dtb->sfx_tbl) {
12605                 DRV_LOG(ERR, "Failed to create meter suffix table.");
12606                 return -1;
12607         }
12608         /* Create matchers, Any and Color. */
12609         dv_attr.priority = 3;
12610         dv_attr.match_criteria_enable = 0;
12611         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj,
12612                                                &dtb->any_matcher);
12613         if (ret) {
12614                 DRV_LOG(ERR, "Failed to create meter"
12615                              " policer default matcher.");
12616                 goto error_exit;
12617         }
12618         dv_attr.priority = 0;
12619         dv_attr.match_criteria_enable =
12620                                 1 << MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
12621         flow_dv_match_meta_reg(mask.buf, value.buf, color_reg_c_idx,
12622                                rte_col_2_mlx5_col(RTE_COLORS), UINT8_MAX);
12623         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj,
12624                                                &dtb->color_matcher);
12625         if (ret) {
12626                 DRV_LOG(ERR, "Failed to create meter policer color matcher.");
12627                 goto error_exit;
12628         }
12629         if (mtb->count_actns[RTE_MTR_DROPPED])
12630                 actions[i++] = mtb->count_actns[RTE_MTR_DROPPED];
12631         actions[i++] = mtb->drop_actn;
12632         /* Default rule: lowest priority, match any, actions: drop. */
12633         ret = mlx5_flow_os_create_flow(dtb->any_matcher, (void *)&value, i,
12634                                        actions,
12635                                        &dtb->policer_rules[RTE_MTR_DROPPED]);
12636         if (ret) {
12637                 DRV_LOG(ERR, "Failed to create meter policer drop rule.");
12638                 goto error_exit;
12639         }
12640         return 0;
12641 error_exit:
12642         return -1;
12643 }
12644
12645 /**
12646  * Create the needed meter and suffix tables.
12647  * Lock free, (mutex should be acquired by caller).
12648  *
12649  * @param[in] dev
12650  *   Pointer to Ethernet device.
12651  * @param[in] fm
12652  *   Pointer to the flow meter.
12653  *
12654  * @return
12655  *   Pointer to table set on success, NULL otherwise and rte_errno is set.
12656  */
12657 static struct mlx5_meter_domains_infos *
12658 flow_dv_create_mtr_tbl(struct rte_eth_dev *dev,
12659                        const struct mlx5_flow_meter *fm)
12660 {
12661         struct mlx5_priv *priv = dev->data->dev_private;
12662         struct mlx5_meter_domains_infos *mtb;
12663         int ret;
12664         int i;
12665
12666         if (!priv->mtr_en) {
12667                 rte_errno = ENOTSUP;
12668                 return NULL;
12669         }
12670         mtb = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*mtb), 0, SOCKET_ID_ANY);
12671         if (!mtb) {
12672                 DRV_LOG(ERR, "Failed to allocate memory for meter.");
12673                 return NULL;
12674         }
12675         /* Create meter count actions */
12676         for (i = 0; i <= RTE_MTR_DROPPED; i++) {
12677                 struct mlx5_flow_counter *cnt;
12678                 if (!fm->policer_stats.cnt[i])
12679                         continue;
12680                 cnt = flow_dv_counter_get_by_idx(dev,
12681                       fm->policer_stats.cnt[i], NULL);
12682                 mtb->count_actns[i] = cnt->action;
12683         }
12684         /* Create drop action. */
12685         ret = mlx5_flow_os_create_flow_action_drop(&mtb->drop_actn);
12686         if (ret) {
12687                 DRV_LOG(ERR, "Failed to create drop action.");
12688                 goto error_exit;
12689         }
12690         /* Egress meter table. */
12691         ret = flow_dv_prepare_mtr_tables(dev, mtb, 1, 0, priv->mtr_color_reg);
12692         if (ret) {
12693                 DRV_LOG(ERR, "Failed to prepare egress meter table.");
12694                 goto error_exit;
12695         }
12696         /* Ingress meter table. */
12697         ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 0, priv->mtr_color_reg);
12698         if (ret) {
12699                 DRV_LOG(ERR, "Failed to prepare ingress meter table.");
12700                 goto error_exit;
12701         }
12702         /* FDB meter table. */
12703         if (priv->config.dv_esw_en) {
12704                 ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 1,
12705                                                  priv->mtr_color_reg);
12706                 if (ret) {
12707                         DRV_LOG(ERR, "Failed to prepare fdb meter table.");
12708                         goto error_exit;
12709                 }
12710         }
12711         return mtb;
12712 error_exit:
12713         flow_dv_destroy_mtr_tbl(dev, mtb);
12714         return NULL;
12715 }
12716
12717 /**
12718  * Destroy domain policer rule.
12719  *
12720  * @param[in] dt
12721  *   Pointer to domain table.
12722  */
12723 static void
12724 flow_dv_destroy_domain_policer_rule(struct mlx5_meter_domain_info *dt)
12725 {
12726         int i;
12727
12728         for (i = 0; i < RTE_MTR_DROPPED; i++) {
12729                 if (dt->policer_rules[i]) {
12730                         claim_zero(mlx5_flow_os_destroy_flow
12731                                    (dt->policer_rules[i]));
12732                         dt->policer_rules[i] = NULL;
12733                 }
12734         }
12735         if (dt->jump_actn) {
12736                 claim_zero(mlx5_flow_os_destroy_flow_action(dt->jump_actn));
12737                 dt->jump_actn = NULL;
12738         }
12739 }
12740
12741 /**
12742  * Destroy policer rules.
12743  *
12744  * @param[in] dev
12745  *   Pointer to Ethernet device.
12746  * @param[in] fm
12747  *   Pointer to flow meter structure.
12748  * @param[in] attr
12749  *   Pointer to flow attributes.
12750  *
12751  * @return
12752  *   Always 0.
12753  */
12754 static int
12755 flow_dv_destroy_policer_rules(struct rte_eth_dev *dev __rte_unused,
12756                               const struct mlx5_flow_meter *fm,
12757                               const struct rte_flow_attr *attr)
12758 {
12759         struct mlx5_meter_domains_infos *mtb = fm ? fm->mfts : NULL;
12760
12761         if (!mtb)
12762                 return 0;
12763         if (attr->egress)
12764                 flow_dv_destroy_domain_policer_rule(&mtb->egress);
12765         if (attr->ingress)
12766                 flow_dv_destroy_domain_policer_rule(&mtb->ingress);
12767         if (attr->transfer)
12768                 flow_dv_destroy_domain_policer_rule(&mtb->transfer);
12769         return 0;
12770 }
12771
12772 /**
12773  * Create specify domain meter policer rule.
12774  *
12775  * @param[in] fm
12776  *   Pointer to flow meter structure.
12777  * @param[in] mtb
12778  *   Pointer to DV meter table set.
12779  * @param[in] mtr_reg_c
12780  *   Color match REG_C.
12781  *
12782  * @return
12783  *   0 on success, -1 otherwise.
12784  */
12785 static int
12786 flow_dv_create_policer_forward_rule(struct mlx5_flow_meter *fm,
12787                                     struct mlx5_meter_domain_info *dtb,
12788                                     uint8_t mtr_reg_c)
12789 {
12790         struct mlx5_flow_dv_match_params matcher = {
12791                 .size = sizeof(matcher.buf),
12792         };
12793         struct mlx5_flow_dv_match_params value = {
12794                 .size = sizeof(value.buf),
12795         };
12796         struct mlx5_meter_domains_infos *mtb = fm->mfts;
12797         void *actions[METER_ACTIONS];
12798         int i;
12799         int ret = 0;
12800
12801         /* Create jump action. */
12802         if (!dtb->jump_actn)
12803                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
12804                                 (dtb->sfx_tbl->obj, &dtb->jump_actn);
12805         if (ret) {
12806                 DRV_LOG(ERR, "Failed to create policer jump action.");
12807                 goto error;
12808         }
12809         for (i = 0; i < RTE_MTR_DROPPED; i++) {
12810                 int j = 0;
12811
12812                 flow_dv_match_meta_reg(matcher.buf, value.buf, mtr_reg_c,
12813                                        rte_col_2_mlx5_col(i), UINT8_MAX);
12814                 if (mtb->count_actns[i])
12815                         actions[j++] = mtb->count_actns[i];
12816                 if (fm->action[i] == MTR_POLICER_ACTION_DROP)
12817                         actions[j++] = mtb->drop_actn;
12818                 else
12819                         actions[j++] = dtb->jump_actn;
12820                 ret = mlx5_flow_os_create_flow(dtb->color_matcher,
12821                                                (void *)&value, j, actions,
12822                                                &dtb->policer_rules[i]);
12823                 if (ret) {
12824                         DRV_LOG(ERR, "Failed to create policer rule.");
12825                         goto error;
12826                 }
12827         }
12828         return 0;
12829 error:
12830         rte_errno = errno;
12831         return -1;
12832 }
12833
12834 /**
12835  * Create policer rules.
12836  *
12837  * @param[in] dev
12838  *   Pointer to Ethernet device.
12839  * @param[in] fm
12840  *   Pointer to flow meter structure.
12841  * @param[in] attr
12842  *   Pointer to flow attributes.
12843  *
12844  * @return
12845  *   0 on success, -1 otherwise.
12846  */
12847 static int
12848 flow_dv_create_policer_rules(struct rte_eth_dev *dev,
12849                              struct mlx5_flow_meter *fm,
12850                              const struct rte_flow_attr *attr)
12851 {
12852         struct mlx5_priv *priv = dev->data->dev_private;
12853         struct mlx5_meter_domains_infos *mtb = fm->mfts;
12854         int ret;
12855
12856         if (attr->egress) {
12857                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->egress,
12858                                                 priv->mtr_color_reg);
12859                 if (ret) {
12860                         DRV_LOG(ERR, "Failed to create egress policer.");
12861                         goto error;
12862                 }
12863         }
12864         if (attr->ingress) {
12865                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->ingress,
12866                                                 priv->mtr_color_reg);
12867                 if (ret) {
12868                         DRV_LOG(ERR, "Failed to create ingress policer.");
12869                         goto error;
12870                 }
12871         }
12872         if (attr->transfer) {
12873                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->transfer,
12874                                                 priv->mtr_color_reg);
12875                 if (ret) {
12876                         DRV_LOG(ERR, "Failed to create transfer policer.");
12877                         goto error;
12878                 }
12879         }
12880         return 0;
12881 error:
12882         flow_dv_destroy_policer_rules(dev, fm, attr);
12883         return -1;
12884 }
12885
12886 /**
12887  * Validate the batch counter support in root table.
12888  *
12889  * Create a simple flow with invalid counter and drop action on root table to
12890  * validate if batch counter with offset on root table is supported or not.
12891  *
12892  * @param[in] dev
12893  *   Pointer to rte_eth_dev structure.
12894  *
12895  * @return
12896  *   0 on success, a negative errno value otherwise and rte_errno is set.
12897  */
12898 int
12899 mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
12900 {
12901         struct mlx5_priv *priv = dev->data->dev_private;
12902         struct mlx5_dev_ctx_shared *sh = priv->sh;
12903         struct mlx5_flow_dv_match_params mask = {
12904                 .size = sizeof(mask.buf),
12905         };
12906         struct mlx5_flow_dv_match_params value = {
12907                 .size = sizeof(value.buf),
12908         };
12909         struct mlx5dv_flow_matcher_attr dv_attr = {
12910                 .type = IBV_FLOW_ATTR_NORMAL,
12911                 .priority = 0,
12912                 .match_criteria_enable = 0,
12913                 .match_mask = (void *)&mask,
12914         };
12915         void *actions[2] = { 0 };
12916         struct mlx5_flow_tbl_resource *tbl = NULL;
12917         struct mlx5_devx_obj *dcs = NULL;
12918         void *matcher = NULL;
12919         void *flow = NULL;
12920         int ret = -1;
12921
12922         tbl = flow_dv_tbl_resource_get(dev, 0, 0, 0, false, NULL, 0, 0, NULL);
12923         if (!tbl)
12924                 goto err;
12925         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
12926         if (!dcs)
12927                 goto err;
12928         ret = mlx5_flow_os_create_flow_action_count(dcs->obj, UINT16_MAX,
12929                                                     &actions[0]);
12930         if (ret)
12931                 goto err;
12932         actions[1] = priv->drop_queue.hrxq->action;
12933         dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
12934         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
12935                                                &matcher);
12936         if (ret)
12937                 goto err;
12938         ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 2,
12939                                        actions, &flow);
12940 err:
12941         /*
12942          * If batch counter with offset is not supported, the driver will not
12943          * validate the invalid offset value, flow create should success.
12944          * In this case, it means batch counter is not supported in root table.
12945          *
12946          * Otherwise, if flow create is failed, counter offset is supported.
12947          */
12948         if (flow) {
12949                 DRV_LOG(INFO, "Batch counter is not supported in root "
12950                               "table. Switch to fallback mode.");
12951                 rte_errno = ENOTSUP;
12952                 ret = -rte_errno;
12953                 claim_zero(mlx5_flow_os_destroy_flow(flow));
12954         } else {
12955                 /* Check matcher to make sure validate fail at flow create. */
12956                 if (!matcher || (matcher && errno != EINVAL))
12957                         DRV_LOG(ERR, "Unexpected error in counter offset "
12958                                      "support detection");
12959                 ret = 0;
12960         }
12961         if (actions[0])
12962                 claim_zero(mlx5_flow_os_destroy_flow_action(actions[0]));
12963         if (matcher)
12964                 claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
12965         if (tbl)
12966                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
12967         if (dcs)
12968                 claim_zero(mlx5_devx_cmd_destroy(dcs));
12969         return ret;
12970 }
12971
12972 /**
12973  * Query a devx counter.
12974  *
12975  * @param[in] dev
12976  *   Pointer to the Ethernet device structure.
12977  * @param[in] cnt
12978  *   Index to the flow counter.
12979  * @param[in] clear
12980  *   Set to clear the counter statistics.
12981  * @param[out] pkts
12982  *   The statistics value of packets.
12983  * @param[out] bytes
12984  *   The statistics value of bytes.
12985  *
12986  * @return
12987  *   0 on success, otherwise return -1.
12988  */
12989 static int
12990 flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear,
12991                       uint64_t *pkts, uint64_t *bytes)
12992 {
12993         struct mlx5_priv *priv = dev->data->dev_private;
12994         struct mlx5_flow_counter *cnt;
12995         uint64_t inn_pkts, inn_bytes;
12996         int ret;
12997
12998         if (!priv->config.devx)
12999                 return -1;
13000
13001         ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes);
13002         if (ret)
13003                 return -1;
13004         cnt = flow_dv_counter_get_by_idx(dev, counter, NULL);
13005         *pkts = inn_pkts - cnt->hits;
13006         *bytes = inn_bytes - cnt->bytes;
13007         if (clear) {
13008                 cnt->hits = inn_pkts;
13009                 cnt->bytes = inn_bytes;
13010         }
13011         return 0;
13012 }
13013
13014 /**
13015  * Get aged-out flows.
13016  *
13017  * @param[in] dev
13018  *   Pointer to the Ethernet device structure.
13019  * @param[in] context
13020  *   The address of an array of pointers to the aged-out flows contexts.
13021  * @param[in] nb_contexts
13022  *   The length of context array pointers.
13023  * @param[out] error
13024  *   Perform verbose error reporting if not NULL. Initialized in case of
13025  *   error only.
13026  *
13027  * @return
13028  *   how many contexts get in success, otherwise negative errno value.
13029  *   if nb_contexts is 0, return the amount of all aged contexts.
13030  *   if nb_contexts is not 0 , return the amount of aged flows reported
13031  *   in the context array.
13032  * @note: only stub for now
13033  */
13034 static int
13035 flow_get_aged_flows(struct rte_eth_dev *dev,
13036                     void **context,
13037                     uint32_t nb_contexts,
13038                     struct rte_flow_error *error)
13039 {
13040         struct mlx5_priv *priv = dev->data->dev_private;
13041         struct mlx5_age_info *age_info;
13042         struct mlx5_age_param *age_param;
13043         struct mlx5_flow_counter *counter;
13044         struct mlx5_aso_age_action *act;
13045         int nb_flows = 0;
13046
13047         if (nb_contexts && !context)
13048                 return rte_flow_error_set(error, EINVAL,
13049                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13050                                           NULL, "empty context");
13051         age_info = GET_PORT_AGE_INFO(priv);
13052         rte_spinlock_lock(&age_info->aged_sl);
13053         LIST_FOREACH(act, &age_info->aged_aso, next) {
13054                 nb_flows++;
13055                 if (nb_contexts) {
13056                         context[nb_flows - 1] =
13057                                                 act->age_params.context;
13058                         if (!(--nb_contexts))
13059                                 break;
13060                 }
13061         }
13062         TAILQ_FOREACH(counter, &age_info->aged_counters, next) {
13063                 nb_flows++;
13064                 if (nb_contexts) {
13065                         age_param = MLX5_CNT_TO_AGE(counter);
13066                         context[nb_flows - 1] = age_param->context;
13067                         if (!(--nb_contexts))
13068                                 break;
13069                 }
13070         }
13071         rte_spinlock_unlock(&age_info->aged_sl);
13072         MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
13073         return nb_flows;
13074 }
13075
13076 /*
13077  * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
13078  */
13079 static uint32_t
13080 flow_dv_counter_allocate(struct rte_eth_dev *dev)
13081 {
13082         return flow_dv_counter_alloc(dev, 0);
13083 }
13084
13085 /**
13086  * Validate shared action.
13087  * Dispatcher for action type specific validation.
13088  *
13089  * @param[in] dev
13090  *   Pointer to the Ethernet device structure.
13091  * @param[in] conf
13092  *   Shared action configuration.
13093  * @param[in] action
13094  *   The shared action object to validate.
13095  * @param[out] error
13096  *   Perform verbose error reporting if not NULL. Initialized in case of
13097  *   error only.
13098  *
13099  * @return
13100  *   0 on success, otherwise negative errno value.
13101  */
13102 static int
13103 flow_dv_action_validate(struct rte_eth_dev *dev,
13104                         const struct rte_flow_shared_action_conf *conf,
13105                         const struct rte_flow_action *action,
13106                         struct rte_flow_error *err)
13107 {
13108         struct mlx5_priv *priv = dev->data->dev_private;
13109
13110         RTE_SET_USED(conf);
13111         switch (action->type) {
13112         case RTE_FLOW_ACTION_TYPE_RSS:
13113                 return mlx5_validate_action_rss(dev, action, err);
13114         case RTE_FLOW_ACTION_TYPE_AGE:
13115                 if (!priv->sh->aso_age_mng)
13116                         return rte_flow_error_set(err, ENOTSUP,
13117                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13118                                                 NULL,
13119                                              "shared age action not supported");
13120                 return flow_dv_validate_action_age(0, action, dev, err);
13121         default:
13122                 return rte_flow_error_set(err, ENOTSUP,
13123                                           RTE_FLOW_ERROR_TYPE_ACTION,
13124                                           NULL,
13125                                           "action type not supported");
13126         }
13127 }
13128
13129 static int
13130 flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags)
13131 {
13132         struct mlx5_priv *priv = dev->data->dev_private;
13133         int ret = 0;
13134
13135         if ((domains & MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain != NULL) {
13136                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->rx_domain,
13137                                                 flags);
13138                 if (ret != 0)
13139                         return ret;
13140         }
13141         if ((domains & MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain != NULL) {
13142                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->tx_domain, flags);
13143                 if (ret != 0)
13144                         return ret;
13145         }
13146         if ((domains & MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain != NULL) {
13147                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->fdb_domain, flags);
13148                 if (ret != 0)
13149                         return ret;
13150         }
13151         return 0;
13152 }
13153
13154 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
13155         .validate = flow_dv_validate,
13156         .prepare = flow_dv_prepare,
13157         .translate = flow_dv_translate,
13158         .apply = flow_dv_apply,
13159         .remove = flow_dv_remove,
13160         .destroy = flow_dv_destroy,
13161         .query = flow_dv_query,
13162         .create_mtr_tbls = flow_dv_create_mtr_tbl,
13163         .destroy_mtr_tbls = flow_dv_destroy_mtr_tbl,
13164         .create_policer_rules = flow_dv_create_policer_rules,
13165         .destroy_policer_rules = flow_dv_destroy_policer_rules,
13166         .counter_alloc = flow_dv_counter_allocate,
13167         .counter_free = flow_dv_counter_free,
13168         .counter_query = flow_dv_counter_query,
13169         .get_aged_flows = flow_get_aged_flows,
13170         .action_validate = flow_dv_action_validate,
13171         .action_create = flow_dv_action_create,
13172         .action_destroy = flow_dv_action_destroy,
13173         .action_update = flow_dv_action_update,
13174         .action_query = flow_dv_action_query,
13175         .sync_domain = flow_dv_sync_domain,
13176 };
13177
13178 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
13179