4758b9f2cb6c94b7ed47709ea559691a6f3a4e63
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <ethdev_driver.h>
14 #include <rte_flow.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_cycles.h>
18 #include <rte_ip.h>
19 #include <rte_gre.h>
20 #include <rte_vxlan.h>
21 #include <rte_gtp.h>
22 #include <rte_eal_paging.h>
23 #include <rte_mpls.h>
24 #include <rte_mtr.h>
25 #include <rte_mtr_driver.h>
26
27 #include <mlx5_glue.h>
28 #include <mlx5_devx_cmds.h>
29 #include <mlx5_prm.h>
30 #include <mlx5_malloc.h>
31
32 #include "mlx5_defs.h"
33 #include "mlx5.h"
34 #include "mlx5_common_os.h"
35 #include "mlx5_flow.h"
36 #include "mlx5_flow_os.h"
37 #include "mlx5_rx.h"
38 #include "mlx5_tx.h"
39 #include "rte_pmd_mlx5.h"
40
41 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
42
43 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
44 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
45 #endif
46
47 #ifndef HAVE_MLX5DV_DR_ESWITCH
48 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
49 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
50 #endif
51 #endif
52
53 #ifndef HAVE_MLX5DV_DR
54 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
55 #endif
56
57 /* VLAN header definitions */
58 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
59 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
60 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
61 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
62 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
63
64 union flow_dv_attr {
65         struct {
66                 uint32_t valid:1;
67                 uint32_t ipv4:1;
68                 uint32_t ipv6:1;
69                 uint32_t tcp:1;
70                 uint32_t udp:1;
71                 uint32_t reserved:27;
72         };
73         uint32_t attr;
74 };
75
76 static int
77 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
78                              struct mlx5_flow_tbl_resource *tbl);
79
80 static int
81 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
82                                      uint32_t encap_decap_idx);
83
84 static int
85 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
86                                         uint32_t port_id);
87 static void
88 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss);
89
90 static int
91 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
92                                   uint32_t rix_jump);
93
94 /**
95  * Initialize flow attributes structure according to flow items' types.
96  *
97  * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel
98  * mode. For tunnel mode, the items to be modified are the outermost ones.
99  *
100  * @param[in] item
101  *   Pointer to item specification.
102  * @param[out] attr
103  *   Pointer to flow attributes structure.
104  * @param[in] dev_flow
105  *   Pointer to the sub flow.
106  * @param[in] tunnel_decap
107  *   Whether action is after tunnel decapsulation.
108  */
109 static void
110 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
111                   struct mlx5_flow *dev_flow, bool tunnel_decap)
112 {
113         uint64_t layers = dev_flow->handle->layers;
114
115         /*
116          * If layers is already initialized, it means this dev_flow is the
117          * suffix flow, the layers flags is set by the prefix flow. Need to
118          * use the layer flags from prefix flow as the suffix flow may not
119          * have the user defined items as the flow is split.
120          */
121         if (layers) {
122                 if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
123                         attr->ipv4 = 1;
124                 else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
125                         attr->ipv6 = 1;
126                 if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
127                         attr->tcp = 1;
128                 else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
129                         attr->udp = 1;
130                 attr->valid = 1;
131                 return;
132         }
133         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
134                 uint8_t next_protocol = 0xff;
135                 switch (item->type) {
136                 case RTE_FLOW_ITEM_TYPE_GRE:
137                 case RTE_FLOW_ITEM_TYPE_NVGRE:
138                 case RTE_FLOW_ITEM_TYPE_VXLAN:
139                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
140                 case RTE_FLOW_ITEM_TYPE_GENEVE:
141                 case RTE_FLOW_ITEM_TYPE_MPLS:
142                         if (tunnel_decap)
143                                 attr->attr = 0;
144                         break;
145                 case RTE_FLOW_ITEM_TYPE_IPV4:
146                         if (!attr->ipv6)
147                                 attr->ipv4 = 1;
148                         if (item->mask != NULL &&
149                             ((const struct rte_flow_item_ipv4 *)
150                             item->mask)->hdr.next_proto_id)
151                                 next_protocol =
152                                     ((const struct rte_flow_item_ipv4 *)
153                                       (item->spec))->hdr.next_proto_id &
154                                     ((const struct rte_flow_item_ipv4 *)
155                                       (item->mask))->hdr.next_proto_id;
156                         if ((next_protocol == IPPROTO_IPIP ||
157                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
158                                 attr->attr = 0;
159                         break;
160                 case RTE_FLOW_ITEM_TYPE_IPV6:
161                         if (!attr->ipv4)
162                                 attr->ipv6 = 1;
163                         if (item->mask != NULL &&
164                             ((const struct rte_flow_item_ipv6 *)
165                             item->mask)->hdr.proto)
166                                 next_protocol =
167                                     ((const struct rte_flow_item_ipv6 *)
168                                       (item->spec))->hdr.proto &
169                                     ((const struct rte_flow_item_ipv6 *)
170                                       (item->mask))->hdr.proto;
171                         if ((next_protocol == IPPROTO_IPIP ||
172                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
173                                 attr->attr = 0;
174                         break;
175                 case RTE_FLOW_ITEM_TYPE_UDP:
176                         if (!attr->tcp)
177                                 attr->udp = 1;
178                         break;
179                 case RTE_FLOW_ITEM_TYPE_TCP:
180                         if (!attr->udp)
181                                 attr->tcp = 1;
182                         break;
183                 default:
184                         break;
185                 }
186         }
187         attr->valid = 1;
188 }
189
190 /**
191  * Convert rte_mtr_color to mlx5 color.
192  *
193  * @param[in] rcol
194  *   rte_mtr_color.
195  *
196  * @return
197  *   mlx5 color.
198  */
199 static int
200 rte_col_2_mlx5_col(enum rte_color rcol)
201 {
202         switch (rcol) {
203         case RTE_COLOR_GREEN:
204                 return MLX5_FLOW_COLOR_GREEN;
205         case RTE_COLOR_YELLOW:
206                 return MLX5_FLOW_COLOR_YELLOW;
207         case RTE_COLOR_RED:
208                 return MLX5_FLOW_COLOR_RED;
209         default:
210                 break;
211         }
212         return MLX5_FLOW_COLOR_UNDEFINED;
213 }
214
215 struct field_modify_info {
216         uint32_t size; /* Size of field in protocol header, in bytes. */
217         uint32_t offset; /* Offset of field in protocol header, in bytes. */
218         enum mlx5_modification_field id;
219 };
220
221 struct field_modify_info modify_eth[] = {
222         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
223         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
224         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
225         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
226         {0, 0, 0},
227 };
228
229 struct field_modify_info modify_vlan_out_first_vid[] = {
230         /* Size in bits !!! */
231         {12, 0, MLX5_MODI_OUT_FIRST_VID},
232         {0, 0, 0},
233 };
234
235 struct field_modify_info modify_ipv4[] = {
236         {1,  1, MLX5_MODI_OUT_IP_DSCP},
237         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
238         {4, 12, MLX5_MODI_OUT_SIPV4},
239         {4, 16, MLX5_MODI_OUT_DIPV4},
240         {0, 0, 0},
241 };
242
243 struct field_modify_info modify_ipv6[] = {
244         {1,  0, MLX5_MODI_OUT_IP_DSCP},
245         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
246         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
247         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
248         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
249         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
250         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
251         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
252         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
253         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
254         {0, 0, 0},
255 };
256
257 struct field_modify_info modify_udp[] = {
258         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
259         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
260         {0, 0, 0},
261 };
262
263 struct field_modify_info modify_tcp[] = {
264         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
265         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
266         {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
267         {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
268         {0, 0, 0},
269 };
270
271 static const struct rte_flow_item *
272 mlx5_flow_find_tunnel_item(const struct rte_flow_item *item)
273 {
274         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
275                 switch (item->type) {
276                 default:
277                         break;
278                 case RTE_FLOW_ITEM_TYPE_VXLAN:
279                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
280                 case RTE_FLOW_ITEM_TYPE_GRE:
281                 case RTE_FLOW_ITEM_TYPE_MPLS:
282                 case RTE_FLOW_ITEM_TYPE_NVGRE:
283                 case RTE_FLOW_ITEM_TYPE_GENEVE:
284                         return item;
285                 case RTE_FLOW_ITEM_TYPE_IPV4:
286                 case RTE_FLOW_ITEM_TYPE_IPV6:
287                         if (item[1].type == RTE_FLOW_ITEM_TYPE_IPV4 ||
288                             item[1].type == RTE_FLOW_ITEM_TYPE_IPV6)
289                                 return item;
290                         break;
291                 }
292         }
293         return NULL;
294 }
295
296 static void
297 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
298                           uint8_t next_protocol, uint64_t *item_flags,
299                           int *tunnel)
300 {
301         MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
302                     item->type == RTE_FLOW_ITEM_TYPE_IPV6);
303         if (next_protocol == IPPROTO_IPIP) {
304                 *item_flags |= MLX5_FLOW_LAYER_IPIP;
305                 *tunnel = 1;
306         }
307         if (next_protocol == IPPROTO_IPV6) {
308                 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
309                 *tunnel = 1;
310         }
311 }
312
313 /* Update VLAN's VID/PCP based on input rte_flow_action.
314  *
315  * @param[in] action
316  *   Pointer to struct rte_flow_action.
317  * @param[out] vlan
318  *   Pointer to struct rte_vlan_hdr.
319  */
320 static void
321 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
322                          struct rte_vlan_hdr *vlan)
323 {
324         uint16_t vlan_tci;
325         if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
326                 vlan_tci =
327                     ((const struct rte_flow_action_of_set_vlan_pcp *)
328                                                action->conf)->vlan_pcp;
329                 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
330                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
331                 vlan->vlan_tci |= vlan_tci;
332         } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
333                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
334                 vlan->vlan_tci |= rte_be_to_cpu_16
335                     (((const struct rte_flow_action_of_set_vlan_vid *)
336                                              action->conf)->vlan_vid);
337         }
338 }
339
340 /**
341  * Fetch 1, 2, 3 or 4 byte field from the byte array
342  * and return as unsigned integer in host-endian format.
343  *
344  * @param[in] data
345  *   Pointer to data array.
346  * @param[in] size
347  *   Size of field to extract.
348  *
349  * @return
350  *   converted field in host endian format.
351  */
352 static inline uint32_t
353 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
354 {
355         uint32_t ret;
356
357         switch (size) {
358         case 1:
359                 ret = *data;
360                 break;
361         case 2:
362                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
363                 break;
364         case 3:
365                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
366                 ret = (ret << 8) | *(data + sizeof(uint16_t));
367                 break;
368         case 4:
369                 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
370                 break;
371         default:
372                 MLX5_ASSERT(false);
373                 ret = 0;
374                 break;
375         }
376         return ret;
377 }
378
379 /**
380  * Convert modify-header action to DV specification.
381  *
382  * Data length of each action is determined by provided field description
383  * and the item mask. Data bit offset and width of each action is determined
384  * by provided item mask.
385  *
386  * @param[in] item
387  *   Pointer to item specification.
388  * @param[in] field
389  *   Pointer to field modification information.
390  *     For MLX5_MODIFICATION_TYPE_SET specifies destination field.
391  *     For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
392  *     For MLX5_MODIFICATION_TYPE_COPY specifies source field.
393  * @param[in] dcopy
394  *   Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
395  *   Negative offset value sets the same offset as source offset.
396  *   size field is ignored, value is taken from source field.
397  * @param[in,out] resource
398  *   Pointer to the modify-header resource.
399  * @param[in] type
400  *   Type of modification.
401  * @param[out] error
402  *   Pointer to the error structure.
403  *
404  * @return
405  *   0 on success, a negative errno value otherwise and rte_errno is set.
406  */
407 static int
408 flow_dv_convert_modify_action(struct rte_flow_item *item,
409                               struct field_modify_info *field,
410                               struct field_modify_info *dcopy,
411                               struct mlx5_flow_dv_modify_hdr_resource *resource,
412                               uint32_t type, struct rte_flow_error *error)
413 {
414         uint32_t i = resource->actions_num;
415         struct mlx5_modification_cmd *actions = resource->actions;
416
417         /*
418          * The item and mask are provided in big-endian format.
419          * The fields should be presented as in big-endian format either.
420          * Mask must be always present, it defines the actual field width.
421          */
422         MLX5_ASSERT(item->mask);
423         MLX5_ASSERT(field->size);
424         do {
425                 unsigned int size_b;
426                 unsigned int off_b;
427                 uint32_t mask;
428                 uint32_t data;
429                 bool next_field = true;
430                 bool next_dcopy = true;
431
432                 if (i >= MLX5_MAX_MODIFY_NUM)
433                         return rte_flow_error_set(error, EINVAL,
434                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
435                                  "too many items to modify");
436                 /* Fetch variable byte size mask from the array. */
437                 mask = flow_dv_fetch_field((const uint8_t *)item->mask +
438                                            field->offset, field->size);
439                 if (!mask) {
440                         ++field;
441                         continue;
442                 }
443                 /* Deduce actual data width in bits from mask value. */
444                 off_b = rte_bsf32(mask);
445                 size_b = sizeof(uint32_t) * CHAR_BIT -
446                          off_b - __builtin_clz(mask);
447                 MLX5_ASSERT(size_b);
448                 actions[i] = (struct mlx5_modification_cmd) {
449                         .action_type = type,
450                         .field = field->id,
451                         .offset = off_b,
452                         .length = (size_b == sizeof(uint32_t) * CHAR_BIT) ?
453                                 0 : size_b,
454                 };
455                 if (type == MLX5_MODIFICATION_TYPE_COPY) {
456                         MLX5_ASSERT(dcopy);
457                         actions[i].dst_field = dcopy->id;
458                         actions[i].dst_offset =
459                                 (int)dcopy->offset < 0 ? off_b : dcopy->offset;
460                         /* Convert entire record to big-endian format. */
461                         actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
462                         /*
463                          * Destination field overflow. Copy leftovers of
464                          * a source field to the next destination field.
465                          */
466                         if ((size_b > dcopy->size * CHAR_BIT) && dcopy->size) {
467                                 actions[i].length = dcopy->size * CHAR_BIT;
468                                 field->offset += dcopy->size;
469                                 next_field = false;
470                         }
471                         /*
472                          * Not enough bits in a source filed to fill a
473                          * destination field. Switch to the next source.
474                          */
475                         if (dcopy->size > field->size &&
476                             (size_b == field->size * CHAR_BIT)) {
477                                 actions[i].length = field->size * CHAR_BIT;
478                                 dcopy->offset += field->size * CHAR_BIT;
479                                 next_dcopy = false;
480                         }
481                         if (next_dcopy)
482                                 ++dcopy;
483                 } else {
484                         MLX5_ASSERT(item->spec);
485                         data = flow_dv_fetch_field((const uint8_t *)item->spec +
486                                                    field->offset, field->size);
487                         /* Shift out the trailing masked bits from data. */
488                         data = (data & mask) >> off_b;
489                         actions[i].data1 = rte_cpu_to_be_32(data);
490                 }
491                 /* Convert entire record to expected big-endian format. */
492                 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
493                 if (next_field)
494                         ++field;
495                 ++i;
496         } while (field->size);
497         if (resource->actions_num == i)
498                 return rte_flow_error_set(error, EINVAL,
499                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
500                                           "invalid modification flow item");
501         resource->actions_num = i;
502         return 0;
503 }
504
505 /**
506  * Convert modify-header set IPv4 address action to DV specification.
507  *
508  * @param[in,out] resource
509  *   Pointer to the modify-header resource.
510  * @param[in] action
511  *   Pointer to action specification.
512  * @param[out] error
513  *   Pointer to the error structure.
514  *
515  * @return
516  *   0 on success, a negative errno value otherwise and rte_errno is set.
517  */
518 static int
519 flow_dv_convert_action_modify_ipv4
520                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
521                          const struct rte_flow_action *action,
522                          struct rte_flow_error *error)
523 {
524         const struct rte_flow_action_set_ipv4 *conf =
525                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
526         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
527         struct rte_flow_item_ipv4 ipv4;
528         struct rte_flow_item_ipv4 ipv4_mask;
529
530         memset(&ipv4, 0, sizeof(ipv4));
531         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
532         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
533                 ipv4.hdr.src_addr = conf->ipv4_addr;
534                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
535         } else {
536                 ipv4.hdr.dst_addr = conf->ipv4_addr;
537                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
538         }
539         item.spec = &ipv4;
540         item.mask = &ipv4_mask;
541         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
542                                              MLX5_MODIFICATION_TYPE_SET, error);
543 }
544
545 /**
546  * Convert modify-header set IPv6 address action to DV specification.
547  *
548  * @param[in,out] resource
549  *   Pointer to the modify-header resource.
550  * @param[in] action
551  *   Pointer to action specification.
552  * @param[out] error
553  *   Pointer to the error structure.
554  *
555  * @return
556  *   0 on success, a negative errno value otherwise and rte_errno is set.
557  */
558 static int
559 flow_dv_convert_action_modify_ipv6
560                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
561                          const struct rte_flow_action *action,
562                          struct rte_flow_error *error)
563 {
564         const struct rte_flow_action_set_ipv6 *conf =
565                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
566         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
567         struct rte_flow_item_ipv6 ipv6;
568         struct rte_flow_item_ipv6 ipv6_mask;
569
570         memset(&ipv6, 0, sizeof(ipv6));
571         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
572         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
573                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
574                        sizeof(ipv6.hdr.src_addr));
575                 memcpy(&ipv6_mask.hdr.src_addr,
576                        &rte_flow_item_ipv6_mask.hdr.src_addr,
577                        sizeof(ipv6.hdr.src_addr));
578         } else {
579                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
580                        sizeof(ipv6.hdr.dst_addr));
581                 memcpy(&ipv6_mask.hdr.dst_addr,
582                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
583                        sizeof(ipv6.hdr.dst_addr));
584         }
585         item.spec = &ipv6;
586         item.mask = &ipv6_mask;
587         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
588                                              MLX5_MODIFICATION_TYPE_SET, error);
589 }
590
591 /**
592  * Convert modify-header set MAC address action to DV specification.
593  *
594  * @param[in,out] resource
595  *   Pointer to the modify-header resource.
596  * @param[in] action
597  *   Pointer to action specification.
598  * @param[out] error
599  *   Pointer to the error structure.
600  *
601  * @return
602  *   0 on success, a negative errno value otherwise and rte_errno is set.
603  */
604 static int
605 flow_dv_convert_action_modify_mac
606                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
607                          const struct rte_flow_action *action,
608                          struct rte_flow_error *error)
609 {
610         const struct rte_flow_action_set_mac *conf =
611                 (const struct rte_flow_action_set_mac *)(action->conf);
612         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
613         struct rte_flow_item_eth eth;
614         struct rte_flow_item_eth eth_mask;
615
616         memset(&eth, 0, sizeof(eth));
617         memset(&eth_mask, 0, sizeof(eth_mask));
618         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
619                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
620                        sizeof(eth.src.addr_bytes));
621                 memcpy(&eth_mask.src.addr_bytes,
622                        &rte_flow_item_eth_mask.src.addr_bytes,
623                        sizeof(eth_mask.src.addr_bytes));
624         } else {
625                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
626                        sizeof(eth.dst.addr_bytes));
627                 memcpy(&eth_mask.dst.addr_bytes,
628                        &rte_flow_item_eth_mask.dst.addr_bytes,
629                        sizeof(eth_mask.dst.addr_bytes));
630         }
631         item.spec = &eth;
632         item.mask = &eth_mask;
633         return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
634                                              MLX5_MODIFICATION_TYPE_SET, error);
635 }
636
637 /**
638  * Convert modify-header set VLAN VID action to DV specification.
639  *
640  * @param[in,out] resource
641  *   Pointer to the modify-header resource.
642  * @param[in] action
643  *   Pointer to action specification.
644  * @param[out] error
645  *   Pointer to the error structure.
646  *
647  * @return
648  *   0 on success, a negative errno value otherwise and rte_errno is set.
649  */
650 static int
651 flow_dv_convert_action_modify_vlan_vid
652                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
653                          const struct rte_flow_action *action,
654                          struct rte_flow_error *error)
655 {
656         const struct rte_flow_action_of_set_vlan_vid *conf =
657                 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
658         int i = resource->actions_num;
659         struct mlx5_modification_cmd *actions = resource->actions;
660         struct field_modify_info *field = modify_vlan_out_first_vid;
661
662         if (i >= MLX5_MAX_MODIFY_NUM)
663                 return rte_flow_error_set(error, EINVAL,
664                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
665                          "too many items to modify");
666         actions[i] = (struct mlx5_modification_cmd) {
667                 .action_type = MLX5_MODIFICATION_TYPE_SET,
668                 .field = field->id,
669                 .length = field->size,
670                 .offset = field->offset,
671         };
672         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
673         actions[i].data1 = conf->vlan_vid;
674         actions[i].data1 = actions[i].data1 << 16;
675         resource->actions_num = ++i;
676         return 0;
677 }
678
679 /**
680  * Convert modify-header set TP action to DV specification.
681  *
682  * @param[in,out] resource
683  *   Pointer to the modify-header resource.
684  * @param[in] action
685  *   Pointer to action specification.
686  * @param[in] items
687  *   Pointer to rte_flow_item objects list.
688  * @param[in] attr
689  *   Pointer to flow attributes structure.
690  * @param[in] dev_flow
691  *   Pointer to the sub flow.
692  * @param[in] tunnel_decap
693  *   Whether action is after tunnel decapsulation.
694  * @param[out] error
695  *   Pointer to the error structure.
696  *
697  * @return
698  *   0 on success, a negative errno value otherwise and rte_errno is set.
699  */
700 static int
701 flow_dv_convert_action_modify_tp
702                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
703                          const struct rte_flow_action *action,
704                          const struct rte_flow_item *items,
705                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
706                          bool tunnel_decap, struct rte_flow_error *error)
707 {
708         const struct rte_flow_action_set_tp *conf =
709                 (const struct rte_flow_action_set_tp *)(action->conf);
710         struct rte_flow_item item;
711         struct rte_flow_item_udp udp;
712         struct rte_flow_item_udp udp_mask;
713         struct rte_flow_item_tcp tcp;
714         struct rte_flow_item_tcp tcp_mask;
715         struct field_modify_info *field;
716
717         if (!attr->valid)
718                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
719         if (attr->udp) {
720                 memset(&udp, 0, sizeof(udp));
721                 memset(&udp_mask, 0, sizeof(udp_mask));
722                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
723                         udp.hdr.src_port = conf->port;
724                         udp_mask.hdr.src_port =
725                                         rte_flow_item_udp_mask.hdr.src_port;
726                 } else {
727                         udp.hdr.dst_port = conf->port;
728                         udp_mask.hdr.dst_port =
729                                         rte_flow_item_udp_mask.hdr.dst_port;
730                 }
731                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
732                 item.spec = &udp;
733                 item.mask = &udp_mask;
734                 field = modify_udp;
735         } else {
736                 MLX5_ASSERT(attr->tcp);
737                 memset(&tcp, 0, sizeof(tcp));
738                 memset(&tcp_mask, 0, sizeof(tcp_mask));
739                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
740                         tcp.hdr.src_port = conf->port;
741                         tcp_mask.hdr.src_port =
742                                         rte_flow_item_tcp_mask.hdr.src_port;
743                 } else {
744                         tcp.hdr.dst_port = conf->port;
745                         tcp_mask.hdr.dst_port =
746                                         rte_flow_item_tcp_mask.hdr.dst_port;
747                 }
748                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
749                 item.spec = &tcp;
750                 item.mask = &tcp_mask;
751                 field = modify_tcp;
752         }
753         return flow_dv_convert_modify_action(&item, field, NULL, resource,
754                                              MLX5_MODIFICATION_TYPE_SET, error);
755 }
756
757 /**
758  * Convert modify-header set TTL action to DV specification.
759  *
760  * @param[in,out] resource
761  *   Pointer to the modify-header resource.
762  * @param[in] action
763  *   Pointer to action specification.
764  * @param[in] items
765  *   Pointer to rte_flow_item objects list.
766  * @param[in] attr
767  *   Pointer to flow attributes structure.
768  * @param[in] dev_flow
769  *   Pointer to the sub flow.
770  * @param[in] tunnel_decap
771  *   Whether action is after tunnel decapsulation.
772  * @param[out] error
773  *   Pointer to the error structure.
774  *
775  * @return
776  *   0 on success, a negative errno value otherwise and rte_errno is set.
777  */
778 static int
779 flow_dv_convert_action_modify_ttl
780                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
781                          const struct rte_flow_action *action,
782                          const struct rte_flow_item *items,
783                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
784                          bool tunnel_decap, struct rte_flow_error *error)
785 {
786         const struct rte_flow_action_set_ttl *conf =
787                 (const struct rte_flow_action_set_ttl *)(action->conf);
788         struct rte_flow_item item;
789         struct rte_flow_item_ipv4 ipv4;
790         struct rte_flow_item_ipv4 ipv4_mask;
791         struct rte_flow_item_ipv6 ipv6;
792         struct rte_flow_item_ipv6 ipv6_mask;
793         struct field_modify_info *field;
794
795         if (!attr->valid)
796                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
797         if (attr->ipv4) {
798                 memset(&ipv4, 0, sizeof(ipv4));
799                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
800                 ipv4.hdr.time_to_live = conf->ttl_value;
801                 ipv4_mask.hdr.time_to_live = 0xFF;
802                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
803                 item.spec = &ipv4;
804                 item.mask = &ipv4_mask;
805                 field = modify_ipv4;
806         } else {
807                 MLX5_ASSERT(attr->ipv6);
808                 memset(&ipv6, 0, sizeof(ipv6));
809                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
810                 ipv6.hdr.hop_limits = conf->ttl_value;
811                 ipv6_mask.hdr.hop_limits = 0xFF;
812                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
813                 item.spec = &ipv6;
814                 item.mask = &ipv6_mask;
815                 field = modify_ipv6;
816         }
817         return flow_dv_convert_modify_action(&item, field, NULL, resource,
818                                              MLX5_MODIFICATION_TYPE_SET, error);
819 }
820
821 /**
822  * Convert modify-header decrement TTL action to DV specification.
823  *
824  * @param[in,out] resource
825  *   Pointer to the modify-header resource.
826  * @param[in] action
827  *   Pointer to action specification.
828  * @param[in] items
829  *   Pointer to rte_flow_item objects list.
830  * @param[in] attr
831  *   Pointer to flow attributes structure.
832  * @param[in] dev_flow
833  *   Pointer to the sub flow.
834  * @param[in] tunnel_decap
835  *   Whether action is after tunnel decapsulation.
836  * @param[out] error
837  *   Pointer to the error structure.
838  *
839  * @return
840  *   0 on success, a negative errno value otherwise and rte_errno is set.
841  */
842 static int
843 flow_dv_convert_action_modify_dec_ttl
844                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
845                          const struct rte_flow_item *items,
846                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
847                          bool tunnel_decap, struct rte_flow_error *error)
848 {
849         struct rte_flow_item item;
850         struct rte_flow_item_ipv4 ipv4;
851         struct rte_flow_item_ipv4 ipv4_mask;
852         struct rte_flow_item_ipv6 ipv6;
853         struct rte_flow_item_ipv6 ipv6_mask;
854         struct field_modify_info *field;
855
856         if (!attr->valid)
857                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
858         if (attr->ipv4) {
859                 memset(&ipv4, 0, sizeof(ipv4));
860                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
861                 ipv4.hdr.time_to_live = 0xFF;
862                 ipv4_mask.hdr.time_to_live = 0xFF;
863                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
864                 item.spec = &ipv4;
865                 item.mask = &ipv4_mask;
866                 field = modify_ipv4;
867         } else {
868                 MLX5_ASSERT(attr->ipv6);
869                 memset(&ipv6, 0, sizeof(ipv6));
870                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
871                 ipv6.hdr.hop_limits = 0xFF;
872                 ipv6_mask.hdr.hop_limits = 0xFF;
873                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
874                 item.spec = &ipv6;
875                 item.mask = &ipv6_mask;
876                 field = modify_ipv6;
877         }
878         return flow_dv_convert_modify_action(&item, field, NULL, resource,
879                                              MLX5_MODIFICATION_TYPE_ADD, error);
880 }
881
882 /**
883  * Convert modify-header increment/decrement TCP Sequence number
884  * to DV specification.
885  *
886  * @param[in,out] resource
887  *   Pointer to the modify-header resource.
888  * @param[in] action
889  *   Pointer to action specification.
890  * @param[out] error
891  *   Pointer to the error structure.
892  *
893  * @return
894  *   0 on success, a negative errno value otherwise and rte_errno is set.
895  */
896 static int
897 flow_dv_convert_action_modify_tcp_seq
898                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
899                          const struct rte_flow_action *action,
900                          struct rte_flow_error *error)
901 {
902         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
903         uint64_t value = rte_be_to_cpu_32(*conf);
904         struct rte_flow_item item;
905         struct rte_flow_item_tcp tcp;
906         struct rte_flow_item_tcp tcp_mask;
907
908         memset(&tcp, 0, sizeof(tcp));
909         memset(&tcp_mask, 0, sizeof(tcp_mask));
910         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
911                 /*
912                  * The HW has no decrement operation, only increment operation.
913                  * To simulate decrement X from Y using increment operation
914                  * we need to add UINT32_MAX X times to Y.
915                  * Each adding of UINT32_MAX decrements Y by 1.
916                  */
917                 value *= UINT32_MAX;
918         tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
919         tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
920         item.type = RTE_FLOW_ITEM_TYPE_TCP;
921         item.spec = &tcp;
922         item.mask = &tcp_mask;
923         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
924                                              MLX5_MODIFICATION_TYPE_ADD, error);
925 }
926
927 /**
928  * Convert modify-header increment/decrement TCP Acknowledgment number
929  * to DV specification.
930  *
931  * @param[in,out] resource
932  *   Pointer to the modify-header resource.
933  * @param[in] action
934  *   Pointer to action specification.
935  * @param[out] error
936  *   Pointer to the error structure.
937  *
938  * @return
939  *   0 on success, a negative errno value otherwise and rte_errno is set.
940  */
941 static int
942 flow_dv_convert_action_modify_tcp_ack
943                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
944                          const struct rte_flow_action *action,
945                          struct rte_flow_error *error)
946 {
947         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
948         uint64_t value = rte_be_to_cpu_32(*conf);
949         struct rte_flow_item item;
950         struct rte_flow_item_tcp tcp;
951         struct rte_flow_item_tcp tcp_mask;
952
953         memset(&tcp, 0, sizeof(tcp));
954         memset(&tcp_mask, 0, sizeof(tcp_mask));
955         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
956                 /*
957                  * The HW has no decrement operation, only increment operation.
958                  * To simulate decrement X from Y using increment operation
959                  * we need to add UINT32_MAX X times to Y.
960                  * Each adding of UINT32_MAX decrements Y by 1.
961                  */
962                 value *= UINT32_MAX;
963         tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
964         tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
965         item.type = RTE_FLOW_ITEM_TYPE_TCP;
966         item.spec = &tcp;
967         item.mask = &tcp_mask;
968         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
969                                              MLX5_MODIFICATION_TYPE_ADD, error);
970 }
971
972 static enum mlx5_modification_field reg_to_field[] = {
973         [REG_NON] = MLX5_MODI_OUT_NONE,
974         [REG_A] = MLX5_MODI_META_DATA_REG_A,
975         [REG_B] = MLX5_MODI_META_DATA_REG_B,
976         [REG_C_0] = MLX5_MODI_META_REG_C_0,
977         [REG_C_1] = MLX5_MODI_META_REG_C_1,
978         [REG_C_2] = MLX5_MODI_META_REG_C_2,
979         [REG_C_3] = MLX5_MODI_META_REG_C_3,
980         [REG_C_4] = MLX5_MODI_META_REG_C_4,
981         [REG_C_5] = MLX5_MODI_META_REG_C_5,
982         [REG_C_6] = MLX5_MODI_META_REG_C_6,
983         [REG_C_7] = MLX5_MODI_META_REG_C_7,
984 };
985
986 /**
987  * Convert register set to DV specification.
988  *
989  * @param[in,out] resource
990  *   Pointer to the modify-header resource.
991  * @param[in] action
992  *   Pointer to action specification.
993  * @param[out] error
994  *   Pointer to the error structure.
995  *
996  * @return
997  *   0 on success, a negative errno value otherwise and rte_errno is set.
998  */
999 static int
1000 flow_dv_convert_action_set_reg
1001                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1002                          const struct rte_flow_action *action,
1003                          struct rte_flow_error *error)
1004 {
1005         const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
1006         struct mlx5_modification_cmd *actions = resource->actions;
1007         uint32_t i = resource->actions_num;
1008
1009         if (i >= MLX5_MAX_MODIFY_NUM)
1010                 return rte_flow_error_set(error, EINVAL,
1011                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1012                                           "too many items to modify");
1013         MLX5_ASSERT(conf->id != REG_NON);
1014         MLX5_ASSERT(conf->id < (enum modify_reg)RTE_DIM(reg_to_field));
1015         actions[i] = (struct mlx5_modification_cmd) {
1016                 .action_type = MLX5_MODIFICATION_TYPE_SET,
1017                 .field = reg_to_field[conf->id],
1018                 .offset = conf->offset,
1019                 .length = conf->length,
1020         };
1021         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
1022         actions[i].data1 = rte_cpu_to_be_32(conf->data);
1023         ++i;
1024         resource->actions_num = i;
1025         return 0;
1026 }
1027
1028 /**
1029  * Convert SET_TAG action to DV specification.
1030  *
1031  * @param[in] dev
1032  *   Pointer to the rte_eth_dev structure.
1033  * @param[in,out] resource
1034  *   Pointer to the modify-header resource.
1035  * @param[in] conf
1036  *   Pointer to action specification.
1037  * @param[out] error
1038  *   Pointer to the error structure.
1039  *
1040  * @return
1041  *   0 on success, a negative errno value otherwise and rte_errno is set.
1042  */
1043 static int
1044 flow_dv_convert_action_set_tag
1045                         (struct rte_eth_dev *dev,
1046                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1047                          const struct rte_flow_action_set_tag *conf,
1048                          struct rte_flow_error *error)
1049 {
1050         rte_be32_t data = rte_cpu_to_be_32(conf->data);
1051         rte_be32_t mask = rte_cpu_to_be_32(conf->mask);
1052         struct rte_flow_item item = {
1053                 .spec = &data,
1054                 .mask = &mask,
1055         };
1056         struct field_modify_info reg_c_x[] = {
1057                 [1] = {0, 0, 0},
1058         };
1059         enum mlx5_modification_field reg_type;
1060         int ret;
1061
1062         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
1063         if (ret < 0)
1064                 return ret;
1065         MLX5_ASSERT(ret != REG_NON);
1066         MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
1067         reg_type = reg_to_field[ret];
1068         MLX5_ASSERT(reg_type > 0);
1069         reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
1070         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1071                                              MLX5_MODIFICATION_TYPE_SET, error);
1072 }
1073
1074 /**
1075  * Convert internal COPY_REG action to DV specification.
1076  *
1077  * @param[in] dev
1078  *   Pointer to the rte_eth_dev structure.
1079  * @param[in,out] res
1080  *   Pointer to the modify-header resource.
1081  * @param[in] action
1082  *   Pointer to action specification.
1083  * @param[out] error
1084  *   Pointer to the error structure.
1085  *
1086  * @return
1087  *   0 on success, a negative errno value otherwise and rte_errno is set.
1088  */
1089 static int
1090 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
1091                                  struct mlx5_flow_dv_modify_hdr_resource *res,
1092                                  const struct rte_flow_action *action,
1093                                  struct rte_flow_error *error)
1094 {
1095         const struct mlx5_flow_action_copy_mreg *conf = action->conf;
1096         rte_be32_t mask = RTE_BE32(UINT32_MAX);
1097         struct rte_flow_item item = {
1098                 .spec = NULL,
1099                 .mask = &mask,
1100         };
1101         struct field_modify_info reg_src[] = {
1102                 {4, 0, reg_to_field[conf->src]},
1103                 {0, 0, 0},
1104         };
1105         struct field_modify_info reg_dst = {
1106                 .offset = 0,
1107                 .id = reg_to_field[conf->dst],
1108         };
1109         /* Adjust reg_c[0] usage according to reported mask. */
1110         if (conf->dst == REG_C_0 || conf->src == REG_C_0) {
1111                 struct mlx5_priv *priv = dev->data->dev_private;
1112                 uint32_t reg_c0 = priv->sh->dv_regc0_mask;
1113
1114                 MLX5_ASSERT(reg_c0);
1115                 MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
1116                 if (conf->dst == REG_C_0) {
1117                         /* Copy to reg_c[0], within mask only. */
1118                         reg_dst.offset = rte_bsf32(reg_c0);
1119                         /*
1120                          * Mask is ignoring the enianness, because
1121                          * there is no conversion in datapath.
1122                          */
1123 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1124                         /* Copy from destination lower bits to reg_c[0]. */
1125                         mask = reg_c0 >> reg_dst.offset;
1126 #else
1127                         /* Copy from destination upper bits to reg_c[0]. */
1128                         mask = reg_c0 << (sizeof(reg_c0) * CHAR_BIT -
1129                                           rte_fls_u32(reg_c0));
1130 #endif
1131                 } else {
1132                         mask = rte_cpu_to_be_32(reg_c0);
1133 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1134                         /* Copy from reg_c[0] to destination lower bits. */
1135                         reg_dst.offset = 0;
1136 #else
1137                         /* Copy from reg_c[0] to destination upper bits. */
1138                         reg_dst.offset = sizeof(reg_c0) * CHAR_BIT -
1139                                          (rte_fls_u32(reg_c0) -
1140                                           rte_bsf32(reg_c0));
1141 #endif
1142                 }
1143         }
1144         return flow_dv_convert_modify_action(&item,
1145                                              reg_src, &reg_dst, res,
1146                                              MLX5_MODIFICATION_TYPE_COPY,
1147                                              error);
1148 }
1149
1150 /**
1151  * Convert MARK action to DV specification. This routine is used
1152  * in extensive metadata only and requires metadata register to be
1153  * handled. In legacy mode hardware tag resource is engaged.
1154  *
1155  * @param[in] dev
1156  *   Pointer to the rte_eth_dev structure.
1157  * @param[in] conf
1158  *   Pointer to MARK action specification.
1159  * @param[in,out] resource
1160  *   Pointer to the modify-header resource.
1161  * @param[out] error
1162  *   Pointer to the error structure.
1163  *
1164  * @return
1165  *   0 on success, a negative errno value otherwise and rte_errno is set.
1166  */
1167 static int
1168 flow_dv_convert_action_mark(struct rte_eth_dev *dev,
1169                             const struct rte_flow_action_mark *conf,
1170                             struct mlx5_flow_dv_modify_hdr_resource *resource,
1171                             struct rte_flow_error *error)
1172 {
1173         struct mlx5_priv *priv = dev->data->dev_private;
1174         rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK &
1175                                            priv->sh->dv_mark_mask);
1176         rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask;
1177         struct rte_flow_item item = {
1178                 .spec = &data,
1179                 .mask = &mask,
1180         };
1181         struct field_modify_info reg_c_x[] = {
1182                 [1] = {0, 0, 0},
1183         };
1184         int reg;
1185
1186         if (!mask)
1187                 return rte_flow_error_set(error, EINVAL,
1188                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1189                                           NULL, "zero mark action mask");
1190         reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1191         if (reg < 0)
1192                 return reg;
1193         MLX5_ASSERT(reg > 0);
1194         if (reg == REG_C_0) {
1195                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1196                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1197
1198                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1199                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1200                 mask = rte_cpu_to_be_32(mask << shl_c0);
1201         }
1202         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1203         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1204                                              MLX5_MODIFICATION_TYPE_SET, error);
1205 }
1206
1207 /**
1208  * Get metadata register index for specified steering domain.
1209  *
1210  * @param[in] dev
1211  *   Pointer to the rte_eth_dev structure.
1212  * @param[in] attr
1213  *   Attributes of flow to determine steering domain.
1214  * @param[out] error
1215  *   Pointer to the error structure.
1216  *
1217  * @return
1218  *   positive index on success, a negative errno value otherwise
1219  *   and rte_errno is set.
1220  */
1221 static enum modify_reg
1222 flow_dv_get_metadata_reg(struct rte_eth_dev *dev,
1223                          const struct rte_flow_attr *attr,
1224                          struct rte_flow_error *error)
1225 {
1226         int reg =
1227                 mlx5_flow_get_reg_id(dev, attr->transfer ?
1228                                           MLX5_METADATA_FDB :
1229                                             attr->egress ?
1230                                             MLX5_METADATA_TX :
1231                                             MLX5_METADATA_RX, 0, error);
1232         if (reg < 0)
1233                 return rte_flow_error_set(error,
1234                                           ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1235                                           NULL, "unavailable "
1236                                           "metadata register");
1237         return reg;
1238 }
1239
1240 /**
1241  * Convert SET_META action to DV specification.
1242  *
1243  * @param[in] dev
1244  *   Pointer to the rte_eth_dev structure.
1245  * @param[in,out] resource
1246  *   Pointer to the modify-header resource.
1247  * @param[in] attr
1248  *   Attributes of flow that includes this item.
1249  * @param[in] conf
1250  *   Pointer to action specification.
1251  * @param[out] error
1252  *   Pointer to the error structure.
1253  *
1254  * @return
1255  *   0 on success, a negative errno value otherwise and rte_errno is set.
1256  */
1257 static int
1258 flow_dv_convert_action_set_meta
1259                         (struct rte_eth_dev *dev,
1260                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1261                          const struct rte_flow_attr *attr,
1262                          const struct rte_flow_action_set_meta *conf,
1263                          struct rte_flow_error *error)
1264 {
1265         uint32_t mask = rte_cpu_to_be_32(conf->mask);
1266         uint32_t data = rte_cpu_to_be_32(conf->data) & mask;
1267         struct rte_flow_item item = {
1268                 .spec = &data,
1269                 .mask = &mask,
1270         };
1271         struct field_modify_info reg_c_x[] = {
1272                 [1] = {0, 0, 0},
1273         };
1274         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1275
1276         if (reg < 0)
1277                 return reg;
1278         MLX5_ASSERT(reg != REG_NON);
1279         if (reg == REG_C_0) {
1280                 struct mlx5_priv *priv = dev->data->dev_private;
1281                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1282                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1283
1284                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1285                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1286                 mask = rte_cpu_to_be_32(mask << shl_c0);
1287         }
1288         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1289         /* The routine expects parameters in memory as big-endian ones. */
1290         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1291                                              MLX5_MODIFICATION_TYPE_SET, error);
1292 }
1293
1294 /**
1295  * Convert modify-header set IPv4 DSCP action to DV specification.
1296  *
1297  * @param[in,out] resource
1298  *   Pointer to the modify-header resource.
1299  * @param[in] action
1300  *   Pointer to action specification.
1301  * @param[out] error
1302  *   Pointer to the error structure.
1303  *
1304  * @return
1305  *   0 on success, a negative errno value otherwise and rte_errno is set.
1306  */
1307 static int
1308 flow_dv_convert_action_modify_ipv4_dscp
1309                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1310                          const struct rte_flow_action *action,
1311                          struct rte_flow_error *error)
1312 {
1313         const struct rte_flow_action_set_dscp *conf =
1314                 (const struct rte_flow_action_set_dscp *)(action->conf);
1315         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
1316         struct rte_flow_item_ipv4 ipv4;
1317         struct rte_flow_item_ipv4 ipv4_mask;
1318
1319         memset(&ipv4, 0, sizeof(ipv4));
1320         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
1321         ipv4.hdr.type_of_service = conf->dscp;
1322         ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2;
1323         item.spec = &ipv4;
1324         item.mask = &ipv4_mask;
1325         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
1326                                              MLX5_MODIFICATION_TYPE_SET, error);
1327 }
1328
1329 /**
1330  * Convert modify-header set IPv6 DSCP action to DV specification.
1331  *
1332  * @param[in,out] resource
1333  *   Pointer to the modify-header resource.
1334  * @param[in] action
1335  *   Pointer to action specification.
1336  * @param[out] error
1337  *   Pointer to the error structure.
1338  *
1339  * @return
1340  *   0 on success, a negative errno value otherwise and rte_errno is set.
1341  */
1342 static int
1343 flow_dv_convert_action_modify_ipv6_dscp
1344                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1345                          const struct rte_flow_action *action,
1346                          struct rte_flow_error *error)
1347 {
1348         const struct rte_flow_action_set_dscp *conf =
1349                 (const struct rte_flow_action_set_dscp *)(action->conf);
1350         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
1351         struct rte_flow_item_ipv6 ipv6;
1352         struct rte_flow_item_ipv6 ipv6_mask;
1353
1354         memset(&ipv6, 0, sizeof(ipv6));
1355         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
1356         /*
1357          * Even though the DSCP bits offset of IPv6 is not byte aligned,
1358          * rdma-core only accept the DSCP bits byte aligned start from
1359          * bit 0 to 5 as to be compatible with IPv4. No need to shift the
1360          * bits in IPv6 case as rdma-core requires byte aligned value.
1361          */
1362         ipv6.hdr.vtc_flow = conf->dscp;
1363         ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22;
1364         item.spec = &ipv6;
1365         item.mask = &ipv6_mask;
1366         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
1367                                              MLX5_MODIFICATION_TYPE_SET, error);
1368 }
1369
1370 static int
1371 mlx5_flow_item_field_width(struct mlx5_dev_config *config,
1372                            enum rte_flow_field_id field)
1373 {
1374         switch (field) {
1375         case RTE_FLOW_FIELD_START:
1376                 return 32;
1377         case RTE_FLOW_FIELD_MAC_DST:
1378         case RTE_FLOW_FIELD_MAC_SRC:
1379                 return 48;
1380         case RTE_FLOW_FIELD_VLAN_TYPE:
1381                 return 16;
1382         case RTE_FLOW_FIELD_VLAN_ID:
1383                 return 12;
1384         case RTE_FLOW_FIELD_MAC_TYPE:
1385                 return 16;
1386         case RTE_FLOW_FIELD_IPV4_DSCP:
1387                 return 6;
1388         case RTE_FLOW_FIELD_IPV4_TTL:
1389                 return 8;
1390         case RTE_FLOW_FIELD_IPV4_SRC:
1391         case RTE_FLOW_FIELD_IPV4_DST:
1392                 return 32;
1393         case RTE_FLOW_FIELD_IPV6_DSCP:
1394                 return 6;
1395         case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1396                 return 8;
1397         case RTE_FLOW_FIELD_IPV6_SRC:
1398         case RTE_FLOW_FIELD_IPV6_DST:
1399                 return 128;
1400         case RTE_FLOW_FIELD_TCP_PORT_SRC:
1401         case RTE_FLOW_FIELD_TCP_PORT_DST:
1402                 return 16;
1403         case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1404         case RTE_FLOW_FIELD_TCP_ACK_NUM:
1405                 return 32;
1406         case RTE_FLOW_FIELD_TCP_FLAGS:
1407                 return 9;
1408         case RTE_FLOW_FIELD_UDP_PORT_SRC:
1409         case RTE_FLOW_FIELD_UDP_PORT_DST:
1410                 return 16;
1411         case RTE_FLOW_FIELD_VXLAN_VNI:
1412         case RTE_FLOW_FIELD_GENEVE_VNI:
1413                 return 24;
1414         case RTE_FLOW_FIELD_GTP_TEID:
1415         case RTE_FLOW_FIELD_TAG:
1416                 return 32;
1417         case RTE_FLOW_FIELD_MARK:
1418                 return 24;
1419         case RTE_FLOW_FIELD_META:
1420                 if (config->dv_xmeta_en == MLX5_XMETA_MODE_META16)
1421                         return 16;
1422                 else if (config->dv_xmeta_en == MLX5_XMETA_MODE_META32)
1423                         return 32;
1424                 else
1425                         return 0;
1426         case RTE_FLOW_FIELD_POINTER:
1427         case RTE_FLOW_FIELD_VALUE:
1428                 return 64;
1429         default:
1430                 MLX5_ASSERT(false);
1431         }
1432         return 0;
1433 }
1434
1435 static void
1436 mlx5_flow_field_id_to_modify_info
1437                 (const struct rte_flow_action_modify_data *data,
1438                  struct field_modify_info *info,
1439                  uint32_t *mask, uint32_t *value,
1440                  uint32_t width, uint32_t dst_width,
1441                  struct rte_eth_dev *dev,
1442                  const struct rte_flow_attr *attr,
1443                  struct rte_flow_error *error)
1444 {
1445         struct mlx5_priv *priv = dev->data->dev_private;
1446         struct mlx5_dev_config *config = &priv->config;
1447         uint32_t idx = 0;
1448         uint32_t off = 0;
1449         uint64_t val = 0;
1450         switch (data->field) {
1451         case RTE_FLOW_FIELD_START:
1452                 /* not supported yet */
1453                 MLX5_ASSERT(false);
1454                 break;
1455         case RTE_FLOW_FIELD_MAC_DST:
1456                 off = data->offset > 16 ? data->offset - 16 : 0;
1457                 if (mask) {
1458                         if (data->offset < 16) {
1459                                 info[idx] = (struct field_modify_info){2, 0,
1460                                                 MLX5_MODI_OUT_DMAC_15_0};
1461                                 if (width < 16) {
1462                                         mask[idx] = rte_cpu_to_be_16(0xffff >>
1463                                                                  (16 - width));
1464                                         width = 0;
1465                                 } else {
1466                                         mask[idx] = RTE_BE16(0xffff);
1467                                         width -= 16;
1468                                 }
1469                                 if (!width)
1470                                         break;
1471                                 ++idx;
1472                         }
1473                         info[idx] = (struct field_modify_info){4, 4 * idx,
1474                                                 MLX5_MODI_OUT_DMAC_47_16};
1475                         mask[idx] = rte_cpu_to_be_32((0xffffffff >>
1476                                                       (32 - width)) << off);
1477                 } else {
1478                         if (data->offset < 16)
1479                                 info[idx++] = (struct field_modify_info){2, 0,
1480                                                 MLX5_MODI_OUT_DMAC_15_0};
1481                         info[idx] = (struct field_modify_info){4, off,
1482                                                 MLX5_MODI_OUT_DMAC_47_16};
1483                 }
1484                 break;
1485         case RTE_FLOW_FIELD_MAC_SRC:
1486                 off = data->offset > 16 ? data->offset - 16 : 0;
1487                 if (mask) {
1488                         if (data->offset < 16) {
1489                                 info[idx] = (struct field_modify_info){2, 0,
1490                                                 MLX5_MODI_OUT_SMAC_15_0};
1491                                 if (width < 16) {
1492                                         mask[idx] = rte_cpu_to_be_16(0xffff >>
1493                                                                  (16 - width));
1494                                         width = 0;
1495                                 } else {
1496                                         mask[idx] = RTE_BE16(0xffff);
1497                                         width -= 16;
1498                                 }
1499                                 if (!width)
1500                                         break;
1501                                 ++idx;
1502                         }
1503                         info[idx] = (struct field_modify_info){4, 4 * idx,
1504                                                 MLX5_MODI_OUT_SMAC_47_16};
1505                         mask[idx] = rte_cpu_to_be_32((0xffffffff >>
1506                                                       (32 - width)) << off);
1507                 } else {
1508                         if (data->offset < 16)
1509                                 info[idx++] = (struct field_modify_info){2, 0,
1510                                                 MLX5_MODI_OUT_SMAC_15_0};
1511                         info[idx] = (struct field_modify_info){4, off,
1512                                                 MLX5_MODI_OUT_SMAC_47_16};
1513                 }
1514                 break;
1515         case RTE_FLOW_FIELD_VLAN_TYPE:
1516                 /* not supported yet */
1517                 break;
1518         case RTE_FLOW_FIELD_VLAN_ID:
1519                 info[idx] = (struct field_modify_info){2, 0,
1520                                         MLX5_MODI_OUT_FIRST_VID};
1521                 if (mask)
1522                         mask[idx] = rte_cpu_to_be_16(0x0fff >> (12 - width));
1523                 break;
1524         case RTE_FLOW_FIELD_MAC_TYPE:
1525                 info[idx] = (struct field_modify_info){2, 0,
1526                                         MLX5_MODI_OUT_ETHERTYPE};
1527                 if (mask)
1528                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1529                 break;
1530         case RTE_FLOW_FIELD_IPV4_DSCP:
1531                 info[idx] = (struct field_modify_info){1, 0,
1532                                         MLX5_MODI_OUT_IP_DSCP};
1533                 if (mask)
1534                         mask[idx] = 0x3f >> (6 - width);
1535                 break;
1536         case RTE_FLOW_FIELD_IPV4_TTL:
1537                 info[idx] = (struct field_modify_info){1, 0,
1538                                         MLX5_MODI_OUT_IPV4_TTL};
1539                 if (mask)
1540                         mask[idx] = 0xff >> (8 - width);
1541                 break;
1542         case RTE_FLOW_FIELD_IPV4_SRC:
1543                 info[idx] = (struct field_modify_info){4, 0,
1544                                         MLX5_MODI_OUT_SIPV4};
1545                 if (mask)
1546                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1547                                                      (32 - width));
1548                 break;
1549         case RTE_FLOW_FIELD_IPV4_DST:
1550                 info[idx] = (struct field_modify_info){4, 0,
1551                                         MLX5_MODI_OUT_DIPV4};
1552                 if (mask)
1553                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1554                                                      (32 - width));
1555                 break;
1556         case RTE_FLOW_FIELD_IPV6_DSCP:
1557                 info[idx] = (struct field_modify_info){1, 0,
1558                                         MLX5_MODI_OUT_IP_DSCP};
1559                 if (mask)
1560                         mask[idx] = 0x3f >> (6 - width);
1561                 break;
1562         case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1563                 info[idx] = (struct field_modify_info){1, 0,
1564                                         MLX5_MODI_OUT_IPV6_HOPLIMIT};
1565                 if (mask)
1566                         mask[idx] = 0xff >> (8 - width);
1567                 break;
1568         case RTE_FLOW_FIELD_IPV6_SRC:
1569                 if (mask) {
1570                         if (data->offset < 32) {
1571                                 info[idx] = (struct field_modify_info){4,
1572                                                 4 * idx,
1573                                                 MLX5_MODI_OUT_SIPV6_31_0};
1574                                 if (width < 32) {
1575                                         mask[idx] =
1576                                                 rte_cpu_to_be_32(0xffffffff >>
1577                                                                  (32 - width));
1578                                         width = 0;
1579                                 } else {
1580                                         mask[idx] = RTE_BE32(0xffffffff);
1581                                         width -= 32;
1582                                 }
1583                                 if (!width)
1584                                         break;
1585                                 ++idx;
1586                         }
1587                         if (data->offset < 64) {
1588                                 info[idx] = (struct field_modify_info){4,
1589                                                 4 * idx,
1590                                                 MLX5_MODI_OUT_SIPV6_63_32};
1591                                 if (width < 32) {
1592                                         mask[idx] =
1593                                                 rte_cpu_to_be_32(0xffffffff >>
1594                                                                  (32 - width));
1595                                         width = 0;
1596                                 } else {
1597                                         mask[idx] = RTE_BE32(0xffffffff);
1598                                         width -= 32;
1599                                 }
1600                                 if (!width)
1601                                         break;
1602                                 ++idx;
1603                         }
1604                         if (data->offset < 96) {
1605                                 info[idx] = (struct field_modify_info){4,
1606                                                 4 * idx,
1607                                                 MLX5_MODI_OUT_SIPV6_95_64};
1608                                 if (width < 32) {
1609                                         mask[idx] =
1610                                                 rte_cpu_to_be_32(0xffffffff >>
1611                                                                  (32 - width));
1612                                         width = 0;
1613                                 } else {
1614                                         mask[idx] = RTE_BE32(0xffffffff);
1615                                         width -= 32;
1616                                 }
1617                                 if (!width)
1618                                         break;
1619                                 ++idx;
1620                         }
1621                         info[idx] = (struct field_modify_info){4, 4 * idx,
1622                                                 MLX5_MODI_OUT_SIPV6_127_96};
1623                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1624                                                      (32 - width));
1625                 } else {
1626                         if (data->offset < 32)
1627                                 info[idx++] = (struct field_modify_info){4, 0,
1628                                                 MLX5_MODI_OUT_SIPV6_31_0};
1629                         if (data->offset < 64)
1630                                 info[idx++] = (struct field_modify_info){4, 0,
1631                                                 MLX5_MODI_OUT_SIPV6_63_32};
1632                         if (data->offset < 96)
1633                                 info[idx++] = (struct field_modify_info){4, 0,
1634                                                 MLX5_MODI_OUT_SIPV6_95_64};
1635                         if (data->offset < 128)
1636                                 info[idx++] = (struct field_modify_info){4, 0,
1637                                                 MLX5_MODI_OUT_SIPV6_127_96};
1638                 }
1639                 break;
1640         case RTE_FLOW_FIELD_IPV6_DST:
1641                 if (mask) {
1642                         if (data->offset < 32) {
1643                                 info[idx] = (struct field_modify_info){4,
1644                                                 4 * idx,
1645                                                 MLX5_MODI_OUT_DIPV6_31_0};
1646                                 if (width < 32) {
1647                                         mask[idx] =
1648                                                 rte_cpu_to_be_32(0xffffffff >>
1649                                                                  (32 - width));
1650                                         width = 0;
1651                                 } else {
1652                                         mask[idx] = RTE_BE32(0xffffffff);
1653                                         width -= 32;
1654                                 }
1655                                 if (!width)
1656                                         break;
1657                                 ++idx;
1658                         }
1659                         if (data->offset < 64) {
1660                                 info[idx] = (struct field_modify_info){4,
1661                                                 4 * idx,
1662                                                 MLX5_MODI_OUT_DIPV6_63_32};
1663                                 if (width < 32) {
1664                                         mask[idx] =
1665                                                 rte_cpu_to_be_32(0xffffffff >>
1666                                                                  (32 - width));
1667                                         width = 0;
1668                                 } else {
1669                                         mask[idx] = RTE_BE32(0xffffffff);
1670                                         width -= 32;
1671                                 }
1672                                 if (!width)
1673                                         break;
1674                                 ++idx;
1675                         }
1676                         if (data->offset < 96) {
1677                                 info[idx] = (struct field_modify_info){4,
1678                                                 4 * idx,
1679                                                 MLX5_MODI_OUT_DIPV6_95_64};
1680                                 if (width < 32) {
1681                                         mask[idx] =
1682                                                 rte_cpu_to_be_32(0xffffffff >>
1683                                                                  (32 - width));
1684                                         width = 0;
1685                                 } else {
1686                                         mask[idx] = RTE_BE32(0xffffffff);
1687                                         width -= 32;
1688                                 }
1689                                 if (!width)
1690                                         break;
1691                                 ++idx;
1692                         }
1693                         info[idx] = (struct field_modify_info){4, 4 * idx,
1694                                                 MLX5_MODI_OUT_DIPV6_127_96};
1695                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1696                                                      (32 - width));
1697                 } else {
1698                         if (data->offset < 32)
1699                                 info[idx++] = (struct field_modify_info){4, 0,
1700                                                 MLX5_MODI_OUT_DIPV6_31_0};
1701                         if (data->offset < 64)
1702                                 info[idx++] = (struct field_modify_info){4, 0,
1703                                                 MLX5_MODI_OUT_DIPV6_63_32};
1704                         if (data->offset < 96)
1705                                 info[idx++] = (struct field_modify_info){4, 0,
1706                                                 MLX5_MODI_OUT_DIPV6_95_64};
1707                         if (data->offset < 128)
1708                                 info[idx++] = (struct field_modify_info){4, 0,
1709                                                 MLX5_MODI_OUT_DIPV6_127_96};
1710                 }
1711                 break;
1712         case RTE_FLOW_FIELD_TCP_PORT_SRC:
1713                 info[idx] = (struct field_modify_info){2, 0,
1714                                         MLX5_MODI_OUT_TCP_SPORT};
1715                 if (mask)
1716                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1717                 break;
1718         case RTE_FLOW_FIELD_TCP_PORT_DST:
1719                 info[idx] = (struct field_modify_info){2, 0,
1720                                         MLX5_MODI_OUT_TCP_DPORT};
1721                 if (mask)
1722                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1723                 break;
1724         case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1725                 info[idx] = (struct field_modify_info){4, 0,
1726                                         MLX5_MODI_OUT_TCP_SEQ_NUM};
1727                 if (mask)
1728                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1729                                                      (32 - width));
1730                 break;
1731         case RTE_FLOW_FIELD_TCP_ACK_NUM:
1732                 info[idx] = (struct field_modify_info){4, 0,
1733                                         MLX5_MODI_OUT_TCP_ACK_NUM};
1734                 if (mask)
1735                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1736                                                      (32 - width));
1737                 break;
1738         case RTE_FLOW_FIELD_TCP_FLAGS:
1739                 info[idx] = (struct field_modify_info){2, 0,
1740                                         MLX5_MODI_OUT_TCP_FLAGS};
1741                 if (mask)
1742                         mask[idx] = rte_cpu_to_be_16(0x1ff >> (9 - width));
1743                 break;
1744         case RTE_FLOW_FIELD_UDP_PORT_SRC:
1745                 info[idx] = (struct field_modify_info){2, 0,
1746                                         MLX5_MODI_OUT_UDP_SPORT};
1747                 if (mask)
1748                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1749                 break;
1750         case RTE_FLOW_FIELD_UDP_PORT_DST:
1751                 info[idx] = (struct field_modify_info){2, 0,
1752                                         MLX5_MODI_OUT_UDP_DPORT};
1753                 if (mask)
1754                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1755                 break;
1756         case RTE_FLOW_FIELD_VXLAN_VNI:
1757                 /* not supported yet */
1758                 break;
1759         case RTE_FLOW_FIELD_GENEVE_VNI:
1760                 /* not supported yet*/
1761                 break;
1762         case RTE_FLOW_FIELD_GTP_TEID:
1763                 info[idx] = (struct field_modify_info){4, 0,
1764                                         MLX5_MODI_GTP_TEID};
1765                 if (mask)
1766                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1767                                                      (32 - width));
1768                 break;
1769         case RTE_FLOW_FIELD_TAG:
1770                 {
1771                         int reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG,
1772                                                    data->level, error);
1773                         if (reg < 0)
1774                                 return;
1775                         MLX5_ASSERT(reg != REG_NON);
1776                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1777                         info[idx] = (struct field_modify_info){4, 0,
1778                                                 reg_to_field[reg]};
1779                         if (mask)
1780                                 mask[idx] =
1781                                         rte_cpu_to_be_32(0xffffffff >>
1782                                                          (32 - width));
1783                 }
1784                 break;
1785         case RTE_FLOW_FIELD_MARK:
1786                 {
1787                         int reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK,
1788                                                        0, error);
1789                         if (reg < 0)
1790                                 return;
1791                         MLX5_ASSERT(reg != REG_NON);
1792                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1793                         info[idx] = (struct field_modify_info){4, 0,
1794                                                 reg_to_field[reg]};
1795                         if (mask)
1796                                 mask[idx] =
1797                                         rte_cpu_to_be_32(0xffffffff >>
1798                                                          (32 - width));
1799                 }
1800                 break;
1801         case RTE_FLOW_FIELD_META:
1802                 {
1803                         unsigned int xmeta = config->dv_xmeta_en;
1804                         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1805                         if (reg < 0)
1806                                 return;
1807                         MLX5_ASSERT(reg != REG_NON);
1808                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1809                         if (xmeta == MLX5_XMETA_MODE_META16) {
1810                                 info[idx] = (struct field_modify_info){2, 0,
1811                                                         reg_to_field[reg]};
1812                                 if (mask)
1813                                         mask[idx] = rte_cpu_to_be_16(0xffff >>
1814                                                                 (16 - width));
1815                         } else if (xmeta == MLX5_XMETA_MODE_META32) {
1816                                 info[idx] = (struct field_modify_info){4, 0,
1817                                                         reg_to_field[reg]};
1818                                 if (mask)
1819                                         mask[idx] =
1820                                                 rte_cpu_to_be_32(0xffffffff >>
1821                                                                 (32 - width));
1822                         } else {
1823                                 MLX5_ASSERT(false);
1824                         }
1825                 }
1826                 break;
1827         case RTE_FLOW_FIELD_POINTER:
1828         case RTE_FLOW_FIELD_VALUE:
1829                 if (data->field == RTE_FLOW_FIELD_POINTER)
1830                         memcpy(&val, (void *)(uintptr_t)data->value,
1831                                sizeof(uint64_t));
1832                 else
1833                         val = data->value;
1834                 for (idx = 0; idx < MLX5_ACT_MAX_MOD_FIELDS; idx++) {
1835                         if (mask[idx]) {
1836                                 if (dst_width == 48) {
1837                                         /*special case for MAC addresses */
1838                                         value[idx] = rte_cpu_to_be_16(val);
1839                                         val >>= 16;
1840                                         dst_width -= 16;
1841                                 } else if (dst_width > 16) {
1842                                         value[idx] = rte_cpu_to_be_32(val);
1843                                         val >>= 32;
1844                                 } else if (dst_width > 8) {
1845                                         value[idx] = rte_cpu_to_be_16(val);
1846                                         val >>= 16;
1847                                 } else {
1848                                         value[idx] = (uint8_t)val;
1849                                         val >>= 8;
1850                                 }
1851                                 if (!val)
1852                                         break;
1853                         }
1854                 }
1855                 break;
1856         default:
1857                 MLX5_ASSERT(false);
1858                 break;
1859         }
1860 }
1861
1862 /**
1863  * Convert modify_field action to DV specification.
1864  *
1865  * @param[in] dev
1866  *   Pointer to the rte_eth_dev structure.
1867  * @param[in,out] resource
1868  *   Pointer to the modify-header resource.
1869  * @param[in] action
1870  *   Pointer to action specification.
1871  * @param[in] attr
1872  *   Attributes of flow that includes this item.
1873  * @param[out] error
1874  *   Pointer to the error structure.
1875  *
1876  * @return
1877  *   0 on success, a negative errno value otherwise and rte_errno is set.
1878  */
1879 static int
1880 flow_dv_convert_action_modify_field
1881                         (struct rte_eth_dev *dev,
1882                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1883                          const struct rte_flow_action *action,
1884                          const struct rte_flow_attr *attr,
1885                          struct rte_flow_error *error)
1886 {
1887         struct mlx5_priv *priv = dev->data->dev_private;
1888         struct mlx5_dev_config *config = &priv->config;
1889         const struct rte_flow_action_modify_field *conf =
1890                 (const struct rte_flow_action_modify_field *)(action->conf);
1891         struct rte_flow_item item;
1892         struct field_modify_info field[MLX5_ACT_MAX_MOD_FIELDS] = {
1893                                                                 {0, 0, 0} };
1894         struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS] = {
1895                                                                 {0, 0, 0} };
1896         uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
1897         uint32_t value[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
1898         uint32_t type;
1899         uint32_t dst_width = mlx5_flow_item_field_width(config,
1900                                                         conf->dst.field);
1901
1902         if (conf->src.field == RTE_FLOW_FIELD_POINTER ||
1903                 conf->src.field == RTE_FLOW_FIELD_VALUE) {
1904                 type = MLX5_MODIFICATION_TYPE_SET;
1905                 /** For SET fill the destination field (field) first. */
1906                 mlx5_flow_field_id_to_modify_info(&conf->dst, field, mask,
1907                         value, conf->width, dst_width, dev, attr, error);
1908                 /** Then copy immediate value from source as per mask. */
1909                 mlx5_flow_field_id_to_modify_info(&conf->src, dcopy, mask,
1910                         value, conf->width, dst_width, dev, attr, error);
1911                 item.spec = &value;
1912         } else {
1913                 type = MLX5_MODIFICATION_TYPE_COPY;
1914                 /** For COPY fill the destination field (dcopy) without mask. */
1915                 mlx5_flow_field_id_to_modify_info(&conf->dst, dcopy, NULL,
1916                         value, conf->width, dst_width, dev, attr, error);
1917                 /** Then construct the source field (field) with mask. */
1918                 mlx5_flow_field_id_to_modify_info(&conf->src, field, mask,
1919                         value, conf->width, dst_width, dev, attr, error);
1920         }
1921         item.mask = &mask;
1922         return flow_dv_convert_modify_action(&item,
1923                         field, dcopy, resource, type, error);
1924 }
1925
1926 /**
1927  * Validate MARK item.
1928  *
1929  * @param[in] dev
1930  *   Pointer to the rte_eth_dev structure.
1931  * @param[in] item
1932  *   Item specification.
1933  * @param[in] attr
1934  *   Attributes of flow that includes this item.
1935  * @param[out] error
1936  *   Pointer to error structure.
1937  *
1938  * @return
1939  *   0 on success, a negative errno value otherwise and rte_errno is set.
1940  */
1941 static int
1942 flow_dv_validate_item_mark(struct rte_eth_dev *dev,
1943                            const struct rte_flow_item *item,
1944                            const struct rte_flow_attr *attr __rte_unused,
1945                            struct rte_flow_error *error)
1946 {
1947         struct mlx5_priv *priv = dev->data->dev_private;
1948         struct mlx5_dev_config *config = &priv->config;
1949         const struct rte_flow_item_mark *spec = item->spec;
1950         const struct rte_flow_item_mark *mask = item->mask;
1951         const struct rte_flow_item_mark nic_mask = {
1952                 .id = priv->sh->dv_mark_mask,
1953         };
1954         int ret;
1955
1956         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1957                 return rte_flow_error_set(error, ENOTSUP,
1958                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1959                                           "extended metadata feature"
1960                                           " isn't enabled");
1961         if (!mlx5_flow_ext_mreg_supported(dev))
1962                 return rte_flow_error_set(error, ENOTSUP,
1963                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1964                                           "extended metadata register"
1965                                           " isn't supported");
1966         if (!nic_mask.id)
1967                 return rte_flow_error_set(error, ENOTSUP,
1968                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1969                                           "extended metadata register"
1970                                           " isn't available");
1971         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1972         if (ret < 0)
1973                 return ret;
1974         if (!spec)
1975                 return rte_flow_error_set(error, EINVAL,
1976                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1977                                           item->spec,
1978                                           "data cannot be empty");
1979         if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id))
1980                 return rte_flow_error_set(error, EINVAL,
1981                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1982                                           &spec->id,
1983                                           "mark id exceeds the limit");
1984         if (!mask)
1985                 mask = &nic_mask;
1986         if (!mask->id)
1987                 return rte_flow_error_set(error, EINVAL,
1988                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1989                                         "mask cannot be zero");
1990
1991         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1992                                         (const uint8_t *)&nic_mask,
1993                                         sizeof(struct rte_flow_item_mark),
1994                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1995         if (ret < 0)
1996                 return ret;
1997         return 0;
1998 }
1999
2000 /**
2001  * Validate META item.
2002  *
2003  * @param[in] dev
2004  *   Pointer to the rte_eth_dev structure.
2005  * @param[in] item
2006  *   Item specification.
2007  * @param[in] attr
2008  *   Attributes of flow that includes this item.
2009  * @param[out] error
2010  *   Pointer to error structure.
2011  *
2012  * @return
2013  *   0 on success, a negative errno value otherwise and rte_errno is set.
2014  */
2015 static int
2016 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
2017                            const struct rte_flow_item *item,
2018                            const struct rte_flow_attr *attr,
2019                            struct rte_flow_error *error)
2020 {
2021         struct mlx5_priv *priv = dev->data->dev_private;
2022         struct mlx5_dev_config *config = &priv->config;
2023         const struct rte_flow_item_meta *spec = item->spec;
2024         const struct rte_flow_item_meta *mask = item->mask;
2025         struct rte_flow_item_meta nic_mask = {
2026                 .data = UINT32_MAX
2027         };
2028         int reg;
2029         int ret;
2030
2031         if (!spec)
2032                 return rte_flow_error_set(error, EINVAL,
2033                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2034                                           item->spec,
2035                                           "data cannot be empty");
2036         if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
2037                 if (!mlx5_flow_ext_mreg_supported(dev))
2038                         return rte_flow_error_set(error, ENOTSUP,
2039                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2040                                           "extended metadata register"
2041                                           " isn't supported");
2042                 reg = flow_dv_get_metadata_reg(dev, attr, error);
2043                 if (reg < 0)
2044                         return reg;
2045                 if (reg == REG_NON)
2046                         return rte_flow_error_set(error, ENOTSUP,
2047                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2048                                         "unavalable extended metadata register");
2049                 if (reg == REG_B)
2050                         return rte_flow_error_set(error, ENOTSUP,
2051                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2052                                           "match on reg_b "
2053                                           "isn't supported");
2054                 if (reg != REG_A)
2055                         nic_mask.data = priv->sh->dv_meta_mask;
2056         } else {
2057                 if (attr->transfer)
2058                         return rte_flow_error_set(error, ENOTSUP,
2059                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2060                                         "extended metadata feature "
2061                                         "should be enabled when "
2062                                         "meta item is requested "
2063                                         "with e-switch mode ");
2064                 if (attr->ingress)
2065                         return rte_flow_error_set(error, ENOTSUP,
2066                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2067                                         "match on metadata for ingress "
2068                                         "is not supported in legacy "
2069                                         "metadata mode");
2070         }
2071         if (!mask)
2072                 mask = &rte_flow_item_meta_mask;
2073         if (!mask->data)
2074                 return rte_flow_error_set(error, EINVAL,
2075                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2076                                         "mask cannot be zero");
2077
2078         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2079                                         (const uint8_t *)&nic_mask,
2080                                         sizeof(struct rte_flow_item_meta),
2081                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2082         return ret;
2083 }
2084
2085 /**
2086  * Validate TAG item.
2087  *
2088  * @param[in] dev
2089  *   Pointer to the rte_eth_dev structure.
2090  * @param[in] item
2091  *   Item specification.
2092  * @param[in] attr
2093  *   Attributes of flow that includes this item.
2094  * @param[out] error
2095  *   Pointer to error structure.
2096  *
2097  * @return
2098  *   0 on success, a negative errno value otherwise and rte_errno is set.
2099  */
2100 static int
2101 flow_dv_validate_item_tag(struct rte_eth_dev *dev,
2102                           const struct rte_flow_item *item,
2103                           const struct rte_flow_attr *attr __rte_unused,
2104                           struct rte_flow_error *error)
2105 {
2106         const struct rte_flow_item_tag *spec = item->spec;
2107         const struct rte_flow_item_tag *mask = item->mask;
2108         const struct rte_flow_item_tag nic_mask = {
2109                 .data = RTE_BE32(UINT32_MAX),
2110                 .index = 0xff,
2111         };
2112         int ret;
2113
2114         if (!mlx5_flow_ext_mreg_supported(dev))
2115                 return rte_flow_error_set(error, ENOTSUP,
2116                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2117                                           "extensive metadata register"
2118                                           " isn't supported");
2119         if (!spec)
2120                 return rte_flow_error_set(error, EINVAL,
2121                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2122                                           item->spec,
2123                                           "data cannot be empty");
2124         if (!mask)
2125                 mask = &rte_flow_item_tag_mask;
2126         if (!mask->data)
2127                 return rte_flow_error_set(error, EINVAL,
2128                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2129                                         "mask cannot be zero");
2130
2131         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2132                                         (const uint8_t *)&nic_mask,
2133                                         sizeof(struct rte_flow_item_tag),
2134                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2135         if (ret < 0)
2136                 return ret;
2137         if (mask->index != 0xff)
2138                 return rte_flow_error_set(error, EINVAL,
2139                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2140                                           "partial mask for tag index"
2141                                           " is not supported");
2142         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
2143         if (ret < 0)
2144                 return ret;
2145         MLX5_ASSERT(ret != REG_NON);
2146         return 0;
2147 }
2148
2149 /**
2150  * Validate vport item.
2151  *
2152  * @param[in] dev
2153  *   Pointer to the rte_eth_dev structure.
2154  * @param[in] item
2155  *   Item specification.
2156  * @param[in] attr
2157  *   Attributes of flow that includes this item.
2158  * @param[in] item_flags
2159  *   Bit-fields that holds the items detected until now.
2160  * @param[out] error
2161  *   Pointer to error structure.
2162  *
2163  * @return
2164  *   0 on success, a negative errno value otherwise and rte_errno is set.
2165  */
2166 static int
2167 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
2168                               const struct rte_flow_item *item,
2169                               const struct rte_flow_attr *attr,
2170                               uint64_t item_flags,
2171                               struct rte_flow_error *error)
2172 {
2173         const struct rte_flow_item_port_id *spec = item->spec;
2174         const struct rte_flow_item_port_id *mask = item->mask;
2175         const struct rte_flow_item_port_id switch_mask = {
2176                         .id = 0xffffffff,
2177         };
2178         struct mlx5_priv *esw_priv;
2179         struct mlx5_priv *dev_priv;
2180         int ret;
2181
2182         if (!attr->transfer)
2183                 return rte_flow_error_set(error, EINVAL,
2184                                           RTE_FLOW_ERROR_TYPE_ITEM,
2185                                           NULL,
2186                                           "match on port id is valid only"
2187                                           " when transfer flag is enabled");
2188         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
2189                 return rte_flow_error_set(error, ENOTSUP,
2190                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2191                                           "multiple source ports are not"
2192                                           " supported");
2193         if (!mask)
2194                 mask = &switch_mask;
2195         if (mask->id != 0xffffffff)
2196                 return rte_flow_error_set(error, ENOTSUP,
2197                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2198                                            mask,
2199                                            "no support for partial mask on"
2200                                            " \"id\" field");
2201         ret = mlx5_flow_item_acceptable
2202                                 (item, (const uint8_t *)mask,
2203                                  (const uint8_t *)&rte_flow_item_port_id_mask,
2204                                  sizeof(struct rte_flow_item_port_id),
2205                                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2206         if (ret)
2207                 return ret;
2208         if (!spec)
2209                 return 0;
2210         esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
2211         if (!esw_priv)
2212                 return rte_flow_error_set(error, rte_errno,
2213                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2214                                           "failed to obtain E-Switch info for"
2215                                           " port");
2216         dev_priv = mlx5_dev_to_eswitch_info(dev);
2217         if (!dev_priv)
2218                 return rte_flow_error_set(error, rte_errno,
2219                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2220                                           NULL,
2221                                           "failed to obtain E-Switch info");
2222         if (esw_priv->domain_id != dev_priv->domain_id)
2223                 return rte_flow_error_set(error, EINVAL,
2224                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2225                                           "cannot match on a port from a"
2226                                           " different E-Switch");
2227         return 0;
2228 }
2229
2230 /**
2231  * Validate VLAN item.
2232  *
2233  * @param[in] item
2234  *   Item specification.
2235  * @param[in] item_flags
2236  *   Bit-fields that holds the items detected until now.
2237  * @param[in] dev
2238  *   Ethernet device flow is being created on.
2239  * @param[out] error
2240  *   Pointer to error structure.
2241  *
2242  * @return
2243  *   0 on success, a negative errno value otherwise and rte_errno is set.
2244  */
2245 static int
2246 flow_dv_validate_item_vlan(const struct rte_flow_item *item,
2247                            uint64_t item_flags,
2248                            struct rte_eth_dev *dev,
2249                            struct rte_flow_error *error)
2250 {
2251         const struct rte_flow_item_vlan *mask = item->mask;
2252         const struct rte_flow_item_vlan nic_mask = {
2253                 .tci = RTE_BE16(UINT16_MAX),
2254                 .inner_type = RTE_BE16(UINT16_MAX),
2255                 .has_more_vlan = 1,
2256         };
2257         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2258         int ret;
2259         const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
2260                                         MLX5_FLOW_LAYER_INNER_L4) :
2261                                        (MLX5_FLOW_LAYER_OUTER_L3 |
2262                                         MLX5_FLOW_LAYER_OUTER_L4);
2263         const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
2264                                         MLX5_FLOW_LAYER_OUTER_VLAN;
2265
2266         if (item_flags & vlanm)
2267                 return rte_flow_error_set(error, EINVAL,
2268                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2269                                           "multiple VLAN layers not supported");
2270         else if ((item_flags & l34m) != 0)
2271                 return rte_flow_error_set(error, EINVAL,
2272                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2273                                           "VLAN cannot follow L3/L4 layer");
2274         if (!mask)
2275                 mask = &rte_flow_item_vlan_mask;
2276         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2277                                         (const uint8_t *)&nic_mask,
2278                                         sizeof(struct rte_flow_item_vlan),
2279                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2280         if (ret)
2281                 return ret;
2282         if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
2283                 struct mlx5_priv *priv = dev->data->dev_private;
2284
2285                 if (priv->vmwa_context) {
2286                         /*
2287                          * Non-NULL context means we have a virtual machine
2288                          * and SR-IOV enabled, we have to create VLAN interface
2289                          * to make hypervisor to setup E-Switch vport
2290                          * context correctly. We avoid creating the multiple
2291                          * VLAN interfaces, so we cannot support VLAN tag mask.
2292                          */
2293                         return rte_flow_error_set(error, EINVAL,
2294                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2295                                                   item,
2296                                                   "VLAN tag mask is not"
2297                                                   " supported in virtual"
2298                                                   " environment");
2299                 }
2300         }
2301         return 0;
2302 }
2303
2304 /*
2305  * GTP flags are contained in 1 byte of the format:
2306  * -------------------------------------------
2307  * | bit   | 0 - 2   | 3  | 4   | 5 | 6 | 7  |
2308  * |-----------------------------------------|
2309  * | value | Version | PT | Res | E | S | PN |
2310  * -------------------------------------------
2311  *
2312  * Matching is supported only for GTP flags E, S, PN.
2313  */
2314 #define MLX5_GTP_FLAGS_MASK     0x07
2315
2316 /**
2317  * Validate GTP item.
2318  *
2319  * @param[in] dev
2320  *   Pointer to the rte_eth_dev structure.
2321  * @param[in] item
2322  *   Item specification.
2323  * @param[in] item_flags
2324  *   Bit-fields that holds the items detected until now.
2325  * @param[out] error
2326  *   Pointer to error structure.
2327  *
2328  * @return
2329  *   0 on success, a negative errno value otherwise and rte_errno is set.
2330  */
2331 static int
2332 flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
2333                           const struct rte_flow_item *item,
2334                           uint64_t item_flags,
2335                           struct rte_flow_error *error)
2336 {
2337         struct mlx5_priv *priv = dev->data->dev_private;
2338         const struct rte_flow_item_gtp *spec = item->spec;
2339         const struct rte_flow_item_gtp *mask = item->mask;
2340         const struct rte_flow_item_gtp nic_mask = {
2341                 .v_pt_rsv_flags = MLX5_GTP_FLAGS_MASK,
2342                 .msg_type = 0xff,
2343                 .teid = RTE_BE32(0xffffffff),
2344         };
2345
2346         if (!priv->config.hca_attr.tunnel_stateless_gtp)
2347                 return rte_flow_error_set(error, ENOTSUP,
2348                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2349                                           "GTP support is not enabled");
2350         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2351                 return rte_flow_error_set(error, ENOTSUP,
2352                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2353                                           "multiple tunnel layers not"
2354                                           " supported");
2355         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2356                 return rte_flow_error_set(error, EINVAL,
2357                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2358                                           "no outer UDP layer found");
2359         if (!mask)
2360                 mask = &rte_flow_item_gtp_mask;
2361         if (spec && spec->v_pt_rsv_flags & ~MLX5_GTP_FLAGS_MASK)
2362                 return rte_flow_error_set(error, ENOTSUP,
2363                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2364                                           "Match is supported for GTP"
2365                                           " flags only");
2366         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2367                                          (const uint8_t *)&nic_mask,
2368                                          sizeof(struct rte_flow_item_gtp),
2369                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2370 }
2371
2372 /**
2373  * Validate GTP PSC item.
2374  *
2375  * @param[in] item
2376  *   Item specification.
2377  * @param[in] last_item
2378  *   Previous validated item in the pattern items.
2379  * @param[in] gtp_item
2380  *   Previous GTP item specification.
2381  * @param[in] attr
2382  *   Pointer to flow attributes.
2383  * @param[out] error
2384  *   Pointer to error structure.
2385  *
2386  * @return
2387  *   0 on success, a negative errno value otherwise and rte_errno is set.
2388  */
2389 static int
2390 flow_dv_validate_item_gtp_psc(const struct rte_flow_item *item,
2391                               uint64_t last_item,
2392                               const struct rte_flow_item *gtp_item,
2393                               const struct rte_flow_attr *attr,
2394                               struct rte_flow_error *error)
2395 {
2396         const struct rte_flow_item_gtp *gtp_spec;
2397         const struct rte_flow_item_gtp *gtp_mask;
2398         const struct rte_flow_item_gtp_psc *spec;
2399         const struct rte_flow_item_gtp_psc *mask;
2400         const struct rte_flow_item_gtp_psc nic_mask = {
2401                 .pdu_type = 0xFF,
2402                 .qfi = 0xFF,
2403         };
2404
2405         if (!gtp_item || !(last_item & MLX5_FLOW_LAYER_GTP))
2406                 return rte_flow_error_set
2407                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2408                          "GTP PSC item must be preceded with GTP item");
2409         gtp_spec = gtp_item->spec;
2410         gtp_mask = gtp_item->mask ? gtp_item->mask : &rte_flow_item_gtp_mask;
2411         /* GTP spec and E flag is requested to match zero. */
2412         if (gtp_spec &&
2413                 (gtp_mask->v_pt_rsv_flags &
2414                 ~gtp_spec->v_pt_rsv_flags & MLX5_GTP_EXT_HEADER_FLAG))
2415                 return rte_flow_error_set
2416                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2417                          "GTP E flag must be 1 to match GTP PSC");
2418         /* Check the flow is not created in group zero. */
2419         if (!attr->transfer && !attr->group)
2420                 return rte_flow_error_set
2421                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2422                          "GTP PSC is not supported for group 0");
2423         /* GTP spec is here and E flag is requested to match zero. */
2424         if (!item->spec)
2425                 return 0;
2426         spec = item->spec;
2427         mask = item->mask ? item->mask : &rte_flow_item_gtp_psc_mask;
2428         if (spec->pdu_type > MLX5_GTP_EXT_MAX_PDU_TYPE)
2429                 return rte_flow_error_set
2430                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2431                          "PDU type should be smaller than 16");
2432         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2433                                          (const uint8_t *)&nic_mask,
2434                                          sizeof(struct rte_flow_item_gtp_psc),
2435                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2436 }
2437
2438 /**
2439  * Validate IPV4 item.
2440  * Use existing validation function mlx5_flow_validate_item_ipv4(), and
2441  * add specific validation of fragment_offset field,
2442  *
2443  * @param[in] item
2444  *   Item specification.
2445  * @param[in] item_flags
2446  *   Bit-fields that holds the items detected until now.
2447  * @param[out] error
2448  *   Pointer to error structure.
2449  *
2450  * @return
2451  *   0 on success, a negative errno value otherwise and rte_errno is set.
2452  */
2453 static int
2454 flow_dv_validate_item_ipv4(const struct rte_flow_item *item,
2455                            uint64_t item_flags,
2456                            uint64_t last_item,
2457                            uint16_t ether_type,
2458                            struct rte_flow_error *error)
2459 {
2460         int ret;
2461         const struct rte_flow_item_ipv4 *spec = item->spec;
2462         const struct rte_flow_item_ipv4 *last = item->last;
2463         const struct rte_flow_item_ipv4 *mask = item->mask;
2464         rte_be16_t fragment_offset_spec = 0;
2465         rte_be16_t fragment_offset_last = 0;
2466         const struct rte_flow_item_ipv4 nic_ipv4_mask = {
2467                 .hdr = {
2468                         .src_addr = RTE_BE32(0xffffffff),
2469                         .dst_addr = RTE_BE32(0xffffffff),
2470                         .type_of_service = 0xff,
2471                         .fragment_offset = RTE_BE16(0xffff),
2472                         .next_proto_id = 0xff,
2473                         .time_to_live = 0xff,
2474                 },
2475         };
2476
2477         ret = mlx5_flow_validate_item_ipv4(item, item_flags, last_item,
2478                                            ether_type, &nic_ipv4_mask,
2479                                            MLX5_ITEM_RANGE_ACCEPTED, error);
2480         if (ret < 0)
2481                 return ret;
2482         if (spec && mask)
2483                 fragment_offset_spec = spec->hdr.fragment_offset &
2484                                        mask->hdr.fragment_offset;
2485         if (!fragment_offset_spec)
2486                 return 0;
2487         /*
2488          * spec and mask are valid, enforce using full mask to make sure the
2489          * complete value is used correctly.
2490          */
2491         if ((mask->hdr.fragment_offset & RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2492                         != RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2493                 return rte_flow_error_set(error, EINVAL,
2494                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2495                                           item, "must use full mask for"
2496                                           " fragment_offset");
2497         /*
2498          * Match on fragment_offset 0x2000 means MF is 1 and frag-offset is 0,
2499          * indicating this is 1st fragment of fragmented packet.
2500          * This is not yet supported in MLX5, return appropriate error message.
2501          */
2502         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG))
2503                 return rte_flow_error_set(error, ENOTSUP,
2504                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2505                                           "match on first fragment not "
2506                                           "supported");
2507         if (fragment_offset_spec && !last)
2508                 return rte_flow_error_set(error, ENOTSUP,
2509                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2510                                           "specified value not supported");
2511         /* spec and last are valid, validate the specified range. */
2512         fragment_offset_last = last->hdr.fragment_offset &
2513                                mask->hdr.fragment_offset;
2514         /*
2515          * Match on fragment_offset spec 0x2001 and last 0x3fff
2516          * means MF is 1 and frag-offset is > 0.
2517          * This packet is fragment 2nd and onward, excluding last.
2518          * This is not yet supported in MLX5, return appropriate
2519          * error message.
2520          */
2521         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG + 1) &&
2522             fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2523                 return rte_flow_error_set(error, ENOTSUP,
2524                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2525                                           last, "match on following "
2526                                           "fragments not supported");
2527         /*
2528          * Match on fragment_offset spec 0x0001 and last 0x1fff
2529          * means MF is 0 and frag-offset is > 0.
2530          * This packet is last fragment of fragmented packet.
2531          * This is not yet supported in MLX5, return appropriate
2532          * error message.
2533          */
2534         if (fragment_offset_spec == RTE_BE16(1) &&
2535             fragment_offset_last == RTE_BE16(RTE_IPV4_HDR_OFFSET_MASK))
2536                 return rte_flow_error_set(error, ENOTSUP,
2537                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2538                                           last, "match on last "
2539                                           "fragment not supported");
2540         /*
2541          * Match on fragment_offset spec 0x0001 and last 0x3fff
2542          * means MF and/or frag-offset is not 0.
2543          * This is a fragmented packet.
2544          * Other range values are invalid and rejected.
2545          */
2546         if (!(fragment_offset_spec == RTE_BE16(1) &&
2547               fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)))
2548                 return rte_flow_error_set(error, ENOTSUP,
2549                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2550                                           "specified range not supported");
2551         return 0;
2552 }
2553
2554 /**
2555  * Validate IPV6 fragment extension item.
2556  *
2557  * @param[in] item
2558  *   Item specification.
2559  * @param[in] item_flags
2560  *   Bit-fields that holds the items detected until now.
2561  * @param[out] error
2562  *   Pointer to error structure.
2563  *
2564  * @return
2565  *   0 on success, a negative errno value otherwise and rte_errno is set.
2566  */
2567 static int
2568 flow_dv_validate_item_ipv6_frag_ext(const struct rte_flow_item *item,
2569                                     uint64_t item_flags,
2570                                     struct rte_flow_error *error)
2571 {
2572         const struct rte_flow_item_ipv6_frag_ext *spec = item->spec;
2573         const struct rte_flow_item_ipv6_frag_ext *last = item->last;
2574         const struct rte_flow_item_ipv6_frag_ext *mask = item->mask;
2575         rte_be16_t frag_data_spec = 0;
2576         rte_be16_t frag_data_last = 0;
2577         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2578         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2579                                       MLX5_FLOW_LAYER_OUTER_L4;
2580         int ret = 0;
2581         struct rte_flow_item_ipv6_frag_ext nic_mask = {
2582                 .hdr = {
2583                         .next_header = 0xff,
2584                         .frag_data = RTE_BE16(0xffff),
2585                 },
2586         };
2587
2588         if (item_flags & l4m)
2589                 return rte_flow_error_set(error, EINVAL,
2590                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2591                                           "ipv6 fragment extension item cannot "
2592                                           "follow L4 item.");
2593         if ((tunnel && !(item_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
2594             (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6)))
2595                 return rte_flow_error_set(error, EINVAL,
2596                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2597                                           "ipv6 fragment extension item must "
2598                                           "follow ipv6 item");
2599         if (spec && mask)
2600                 frag_data_spec = spec->hdr.frag_data & mask->hdr.frag_data;
2601         if (!frag_data_spec)
2602                 return 0;
2603         /*
2604          * spec and mask are valid, enforce using full mask to make sure the
2605          * complete value is used correctly.
2606          */
2607         if ((mask->hdr.frag_data & RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) !=
2608                                 RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2609                 return rte_flow_error_set(error, EINVAL,
2610                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2611                                           item, "must use full mask for"
2612                                           " frag_data");
2613         /*
2614          * Match on frag_data 0x00001 means M is 1 and frag-offset is 0.
2615          * This is 1st fragment of fragmented packet.
2616          */
2617         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_MF_MASK))
2618                 return rte_flow_error_set(error, ENOTSUP,
2619                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2620                                           "match on first fragment not "
2621                                           "supported");
2622         if (frag_data_spec && !last)
2623                 return rte_flow_error_set(error, EINVAL,
2624                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2625                                           "specified value not supported");
2626         ret = mlx5_flow_item_acceptable
2627                                 (item, (const uint8_t *)mask,
2628                                  (const uint8_t *)&nic_mask,
2629                                  sizeof(struct rte_flow_item_ipv6_frag_ext),
2630                                  MLX5_ITEM_RANGE_ACCEPTED, error);
2631         if (ret)
2632                 return ret;
2633         /* spec and last are valid, validate the specified range. */
2634         frag_data_last = last->hdr.frag_data & mask->hdr.frag_data;
2635         /*
2636          * Match on frag_data spec 0x0009 and last 0xfff9
2637          * means M is 1 and frag-offset is > 0.
2638          * This packet is fragment 2nd and onward, excluding last.
2639          * This is not yet supported in MLX5, return appropriate
2640          * error message.
2641          */
2642         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN |
2643                                        RTE_IPV6_EHDR_MF_MASK) &&
2644             frag_data_last == RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2645                 return rte_flow_error_set(error, ENOTSUP,
2646                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2647                                           last, "match on following "
2648                                           "fragments not supported");
2649         /*
2650          * Match on frag_data spec 0x0008 and last 0xfff8
2651          * means M is 0 and frag-offset is > 0.
2652          * This packet is last fragment of fragmented packet.
2653          * This is not yet supported in MLX5, return appropriate
2654          * error message.
2655          */
2656         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN) &&
2657             frag_data_last == RTE_BE16(RTE_IPV6_EHDR_FO_MASK))
2658                 return rte_flow_error_set(error, ENOTSUP,
2659                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2660                                           last, "match on last "
2661                                           "fragment not supported");
2662         /* Other range values are invalid and rejected. */
2663         return rte_flow_error_set(error, EINVAL,
2664                                   RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2665                                   "specified range not supported");
2666 }
2667
2668 /*
2669  * Validate ASO CT item.
2670  *
2671  * @param[in] dev
2672  *   Pointer to the rte_eth_dev structure.
2673  * @param[in] item
2674  *   Item specification.
2675  * @param[in] item_flags
2676  *   Pointer to bit-fields that holds the items detected until now.
2677  * @param[out] error
2678  *   Pointer to error structure.
2679  *
2680  * @return
2681  *   0 on success, a negative errno value otherwise and rte_errno is set.
2682  */
2683 static int
2684 flow_dv_validate_item_aso_ct(struct rte_eth_dev *dev,
2685                              const struct rte_flow_item *item,
2686                              uint64_t *item_flags,
2687                              struct rte_flow_error *error)
2688 {
2689         const struct rte_flow_item_conntrack *spec = item->spec;
2690         const struct rte_flow_item_conntrack *mask = item->mask;
2691         RTE_SET_USED(dev);
2692         uint32_t flags;
2693
2694         if (*item_flags & MLX5_FLOW_LAYER_ASO_CT)
2695                 return rte_flow_error_set(error, EINVAL,
2696                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2697                                           "Only one CT is supported");
2698         if (!mask)
2699                 mask = &rte_flow_item_conntrack_mask;
2700         flags = spec->flags & mask->flags;
2701         if ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID) &&
2702             ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID) ||
2703              (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD) ||
2704              (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)))
2705                 return rte_flow_error_set(error, EINVAL,
2706                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2707                                           "Conflict status bits");
2708         /* State change also needs to be considered. */
2709         *item_flags |= MLX5_FLOW_LAYER_ASO_CT;
2710         return 0;
2711 }
2712
2713 /**
2714  * Validate the pop VLAN action.
2715  *
2716  * @param[in] dev
2717  *   Pointer to the rte_eth_dev structure.
2718  * @param[in] action_flags
2719  *   Holds the actions detected until now.
2720  * @param[in] action
2721  *   Pointer to the pop vlan action.
2722  * @param[in] item_flags
2723  *   The items found in this flow rule.
2724  * @param[in] attr
2725  *   Pointer to flow attributes.
2726  * @param[out] error
2727  *   Pointer to error structure.
2728  *
2729  * @return
2730  *   0 on success, a negative errno value otherwise and rte_errno is set.
2731  */
2732 static int
2733 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
2734                                  uint64_t action_flags,
2735                                  const struct rte_flow_action *action,
2736                                  uint64_t item_flags,
2737                                  const struct rte_flow_attr *attr,
2738                                  struct rte_flow_error *error)
2739 {
2740         const struct mlx5_priv *priv = dev->data->dev_private;
2741
2742         (void)action;
2743         (void)attr;
2744         if (!priv->sh->pop_vlan_action)
2745                 return rte_flow_error_set(error, ENOTSUP,
2746                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2747                                           NULL,
2748                                           "pop vlan action is not supported");
2749         if (attr->egress)
2750                 return rte_flow_error_set(error, ENOTSUP,
2751                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2752                                           NULL,
2753                                           "pop vlan action not supported for "
2754                                           "egress");
2755         if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
2756                 return rte_flow_error_set(error, ENOTSUP,
2757                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2758                                           "no support for multiple VLAN "
2759                                           "actions");
2760         /* Pop VLAN with preceding Decap requires inner header with VLAN. */
2761         if ((action_flags & MLX5_FLOW_ACTION_DECAP) &&
2762             !(item_flags & MLX5_FLOW_LAYER_INNER_VLAN))
2763                 return rte_flow_error_set(error, ENOTSUP,
2764                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2765                                           NULL,
2766                                           "cannot pop vlan after decap without "
2767                                           "match on inner vlan in the flow");
2768         /* Pop VLAN without preceding Decap requires outer header with VLAN. */
2769         if (!(action_flags & MLX5_FLOW_ACTION_DECAP) &&
2770             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2771                 return rte_flow_error_set(error, ENOTSUP,
2772                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2773                                           NULL,
2774                                           "cannot pop vlan without a "
2775                                           "match on (outer) vlan in the flow");
2776         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2777                 return rte_flow_error_set(error, EINVAL,
2778                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2779                                           "wrong action order, port_id should "
2780                                           "be after pop VLAN action");
2781         if (!attr->transfer && priv->representor)
2782                 return rte_flow_error_set(error, ENOTSUP,
2783                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2784                                           "pop vlan action for VF representor "
2785                                           "not supported on NIC table");
2786         return 0;
2787 }
2788
2789 /**
2790  * Get VLAN default info from vlan match info.
2791  *
2792  * @param[in] items
2793  *   the list of item specifications.
2794  * @param[out] vlan
2795  *   pointer VLAN info to fill to.
2796  *
2797  * @return
2798  *   0 on success, a negative errno value otherwise and rte_errno is set.
2799  */
2800 static void
2801 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
2802                                   struct rte_vlan_hdr *vlan)
2803 {
2804         const struct rte_flow_item_vlan nic_mask = {
2805                 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
2806                                 MLX5DV_FLOW_VLAN_VID_MASK),
2807                 .inner_type = RTE_BE16(0xffff),
2808         };
2809
2810         if (items == NULL)
2811                 return;
2812         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2813                 int type = items->type;
2814
2815                 if (type == RTE_FLOW_ITEM_TYPE_VLAN ||
2816                     type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
2817                         break;
2818         }
2819         if (items->type != RTE_FLOW_ITEM_TYPE_END) {
2820                 const struct rte_flow_item_vlan *vlan_m = items->mask;
2821                 const struct rte_flow_item_vlan *vlan_v = items->spec;
2822
2823                 /* If VLAN item in pattern doesn't contain data, return here. */
2824                 if (!vlan_v)
2825                         return;
2826                 if (!vlan_m)
2827                         vlan_m = &nic_mask;
2828                 /* Only full match values are accepted */
2829                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
2830                      MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
2831                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
2832                         vlan->vlan_tci |=
2833                                 rte_be_to_cpu_16(vlan_v->tci &
2834                                                  MLX5DV_FLOW_VLAN_PCP_MASK_BE);
2835                 }
2836                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
2837                      MLX5DV_FLOW_VLAN_VID_MASK_BE) {
2838                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
2839                         vlan->vlan_tci |=
2840                                 rte_be_to_cpu_16(vlan_v->tci &
2841                                                  MLX5DV_FLOW_VLAN_VID_MASK_BE);
2842                 }
2843                 if (vlan_m->inner_type == nic_mask.inner_type)
2844                         vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
2845                                                            vlan_m->inner_type);
2846         }
2847 }
2848
2849 /**
2850  * Validate the push VLAN action.
2851  *
2852  * @param[in] dev
2853  *   Pointer to the rte_eth_dev structure.
2854  * @param[in] action_flags
2855  *   Holds the actions detected until now.
2856  * @param[in] item_flags
2857  *   The items found in this flow rule.
2858  * @param[in] action
2859  *   Pointer to the action structure.
2860  * @param[in] attr
2861  *   Pointer to flow attributes
2862  * @param[out] error
2863  *   Pointer to error structure.
2864  *
2865  * @return
2866  *   0 on success, a negative errno value otherwise and rte_errno is set.
2867  */
2868 static int
2869 flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
2870                                   uint64_t action_flags,
2871                                   const struct rte_flow_item_vlan *vlan_m,
2872                                   const struct rte_flow_action *action,
2873                                   const struct rte_flow_attr *attr,
2874                                   struct rte_flow_error *error)
2875 {
2876         const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
2877         const struct mlx5_priv *priv = dev->data->dev_private;
2878
2879         if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
2880             push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
2881                 return rte_flow_error_set(error, EINVAL,
2882                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2883                                           "invalid vlan ethertype");
2884         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2885                 return rte_flow_error_set(error, EINVAL,
2886                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2887                                           "wrong action order, port_id should "
2888                                           "be after push VLAN");
2889         if (!attr->transfer && priv->representor)
2890                 return rte_flow_error_set(error, ENOTSUP,
2891                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2892                                           "push vlan action for VF representor "
2893                                           "not supported on NIC table");
2894         if (vlan_m &&
2895             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) &&
2896             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) !=
2897                 MLX5DV_FLOW_VLAN_PCP_MASK_BE &&
2898             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) &&
2899             !(mlx5_flow_find_action
2900                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)))
2901                 return rte_flow_error_set(error, EINVAL,
2902                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2903                                           "not full match mask on VLAN PCP and "
2904                                           "there is no of_set_vlan_pcp action, "
2905                                           "push VLAN action cannot figure out "
2906                                           "PCP value");
2907         if (vlan_m &&
2908             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) &&
2909             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) !=
2910                 MLX5DV_FLOW_VLAN_VID_MASK_BE &&
2911             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) &&
2912             !(mlx5_flow_find_action
2913                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)))
2914                 return rte_flow_error_set(error, EINVAL,
2915                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2916                                           "not full match mask on VLAN VID and "
2917                                           "there is no of_set_vlan_vid action, "
2918                                           "push VLAN action cannot figure out "
2919                                           "VID value");
2920         (void)attr;
2921         return 0;
2922 }
2923
2924 /**
2925  * Validate the set VLAN PCP.
2926  *
2927  * @param[in] action_flags
2928  *   Holds the actions detected until now.
2929  * @param[in] actions
2930  *   Pointer to the list of actions remaining in the flow rule.
2931  * @param[out] error
2932  *   Pointer to error structure.
2933  *
2934  * @return
2935  *   0 on success, a negative errno value otherwise and rte_errno is set.
2936  */
2937 static int
2938 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
2939                                      const struct rte_flow_action actions[],
2940                                      struct rte_flow_error *error)
2941 {
2942         const struct rte_flow_action *action = actions;
2943         const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
2944
2945         if (conf->vlan_pcp > 7)
2946                 return rte_flow_error_set(error, EINVAL,
2947                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2948                                           "VLAN PCP value is too big");
2949         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
2950                 return rte_flow_error_set(error, ENOTSUP,
2951                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2952                                           "set VLAN PCP action must follow "
2953                                           "the push VLAN action");
2954         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
2955                 return rte_flow_error_set(error, ENOTSUP,
2956                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2957                                           "Multiple VLAN PCP modification are "
2958                                           "not supported");
2959         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2960                 return rte_flow_error_set(error, EINVAL,
2961                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2962                                           "wrong action order, port_id should "
2963                                           "be after set VLAN PCP");
2964         return 0;
2965 }
2966
2967 /**
2968  * Validate the set VLAN VID.
2969  *
2970  * @param[in] item_flags
2971  *   Holds the items detected in this rule.
2972  * @param[in] action_flags
2973  *   Holds the actions detected until now.
2974  * @param[in] actions
2975  *   Pointer to the list of actions remaining in the flow rule.
2976  * @param[out] error
2977  *   Pointer to error structure.
2978  *
2979  * @return
2980  *   0 on success, a negative errno value otherwise and rte_errno is set.
2981  */
2982 static int
2983 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
2984                                      uint64_t action_flags,
2985                                      const struct rte_flow_action actions[],
2986                                      struct rte_flow_error *error)
2987 {
2988         const struct rte_flow_action *action = actions;
2989         const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
2990
2991         if (rte_be_to_cpu_16(conf->vlan_vid) > 0xFFE)
2992                 return rte_flow_error_set(error, EINVAL,
2993                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2994                                           "VLAN VID value is too big");
2995         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
2996             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2997                 return rte_flow_error_set(error, ENOTSUP,
2998                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2999                                           "set VLAN VID action must follow push"
3000                                           " VLAN action or match on VLAN item");
3001         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
3002                 return rte_flow_error_set(error, ENOTSUP,
3003                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3004                                           "Multiple VLAN VID modifications are "
3005                                           "not supported");
3006         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
3007                 return rte_flow_error_set(error, EINVAL,
3008                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3009                                           "wrong action order, port_id should "
3010                                           "be after set VLAN VID");
3011         return 0;
3012 }
3013
3014 /*
3015  * Validate the FLAG action.
3016  *
3017  * @param[in] dev
3018  *   Pointer to the rte_eth_dev structure.
3019  * @param[in] action_flags
3020  *   Holds the actions detected until now.
3021  * @param[in] attr
3022  *   Pointer to flow attributes
3023  * @param[out] error
3024  *   Pointer to error structure.
3025  *
3026  * @return
3027  *   0 on success, a negative errno value otherwise and rte_errno is set.
3028  */
3029 static int
3030 flow_dv_validate_action_flag(struct rte_eth_dev *dev,
3031                              uint64_t action_flags,
3032                              const struct rte_flow_attr *attr,
3033                              struct rte_flow_error *error)
3034 {
3035         struct mlx5_priv *priv = dev->data->dev_private;
3036         struct mlx5_dev_config *config = &priv->config;
3037         int ret;
3038
3039         /* Fall back if no extended metadata register support. */
3040         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
3041                 return mlx5_flow_validate_action_flag(action_flags, attr,
3042                                                       error);
3043         /* Extensive metadata mode requires registers. */
3044         if (!mlx5_flow_ext_mreg_supported(dev))
3045                 return rte_flow_error_set(error, ENOTSUP,
3046                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3047                                           "no metadata registers "
3048                                           "to support flag action");
3049         if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT))
3050                 return rte_flow_error_set(error, ENOTSUP,
3051                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3052                                           "extended metadata register"
3053                                           " isn't available");
3054         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3055         if (ret < 0)
3056                 return ret;
3057         MLX5_ASSERT(ret > 0);
3058         if (action_flags & MLX5_FLOW_ACTION_MARK)
3059                 return rte_flow_error_set(error, EINVAL,
3060                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3061                                           "can't mark and flag in same flow");
3062         if (action_flags & MLX5_FLOW_ACTION_FLAG)
3063                 return rte_flow_error_set(error, EINVAL,
3064                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3065                                           "can't have 2 flag"
3066                                           " actions in same flow");
3067         return 0;
3068 }
3069
3070 /**
3071  * Validate MARK action.
3072  *
3073  * @param[in] dev
3074  *   Pointer to the rte_eth_dev structure.
3075  * @param[in] action
3076  *   Pointer to action.
3077  * @param[in] action_flags
3078  *   Holds the actions detected until now.
3079  * @param[in] attr
3080  *   Pointer to flow attributes
3081  * @param[out] error
3082  *   Pointer to error structure.
3083  *
3084  * @return
3085  *   0 on success, a negative errno value otherwise and rte_errno is set.
3086  */
3087 static int
3088 flow_dv_validate_action_mark(struct rte_eth_dev *dev,
3089                              const struct rte_flow_action *action,
3090                              uint64_t action_flags,
3091                              const struct rte_flow_attr *attr,
3092                              struct rte_flow_error *error)
3093 {
3094         struct mlx5_priv *priv = dev->data->dev_private;
3095         struct mlx5_dev_config *config = &priv->config;
3096         const struct rte_flow_action_mark *mark = action->conf;
3097         int ret;
3098
3099         if (is_tunnel_offload_active(dev))
3100                 return rte_flow_error_set(error, ENOTSUP,
3101                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3102                                           "no mark action "
3103                                           "if tunnel offload active");
3104         /* Fall back if no extended metadata register support. */
3105         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
3106                 return mlx5_flow_validate_action_mark(action, action_flags,
3107                                                       attr, error);
3108         /* Extensive metadata mode requires registers. */
3109         if (!mlx5_flow_ext_mreg_supported(dev))
3110                 return rte_flow_error_set(error, ENOTSUP,
3111                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3112                                           "no metadata registers "
3113                                           "to support mark action");
3114         if (!priv->sh->dv_mark_mask)
3115                 return rte_flow_error_set(error, ENOTSUP,
3116                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3117                                           "extended metadata register"
3118                                           " isn't available");
3119         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3120         if (ret < 0)
3121                 return ret;
3122         MLX5_ASSERT(ret > 0);
3123         if (!mark)
3124                 return rte_flow_error_set(error, EINVAL,
3125                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3126                                           "configuration cannot be null");
3127         if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask))
3128                 return rte_flow_error_set(error, EINVAL,
3129                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3130                                           &mark->id,
3131                                           "mark id exceeds the limit");
3132         if (action_flags & MLX5_FLOW_ACTION_FLAG)
3133                 return rte_flow_error_set(error, EINVAL,
3134                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3135                                           "can't flag and mark in same flow");
3136         if (action_flags & MLX5_FLOW_ACTION_MARK)
3137                 return rte_flow_error_set(error, EINVAL,
3138                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3139                                           "can't have 2 mark actions in same"
3140                                           " flow");
3141         return 0;
3142 }
3143
3144 /**
3145  * Validate SET_META action.
3146  *
3147  * @param[in] dev
3148  *   Pointer to the rte_eth_dev structure.
3149  * @param[in] action
3150  *   Pointer to the action structure.
3151  * @param[in] action_flags
3152  *   Holds the actions detected until now.
3153  * @param[in] attr
3154  *   Pointer to flow attributes
3155  * @param[out] error
3156  *   Pointer to error structure.
3157  *
3158  * @return
3159  *   0 on success, a negative errno value otherwise and rte_errno is set.
3160  */
3161 static int
3162 flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
3163                                  const struct rte_flow_action *action,
3164                                  uint64_t action_flags __rte_unused,
3165                                  const struct rte_flow_attr *attr,
3166                                  struct rte_flow_error *error)
3167 {
3168         const struct rte_flow_action_set_meta *conf;
3169         uint32_t nic_mask = UINT32_MAX;
3170         int reg;
3171
3172         if (!mlx5_flow_ext_mreg_supported(dev))
3173                 return rte_flow_error_set(error, ENOTSUP,
3174                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3175                                           "extended metadata register"
3176                                           " isn't supported");
3177         reg = flow_dv_get_metadata_reg(dev, attr, error);
3178         if (reg < 0)
3179                 return reg;
3180         if (reg == REG_NON)
3181                 return rte_flow_error_set(error, ENOTSUP,
3182                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3183                                           "unavalable extended metadata register");
3184         if (reg != REG_A && reg != REG_B) {
3185                 struct mlx5_priv *priv = dev->data->dev_private;
3186
3187                 nic_mask = priv->sh->dv_meta_mask;
3188         }
3189         if (!(action->conf))
3190                 return rte_flow_error_set(error, EINVAL,
3191                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3192                                           "configuration cannot be null");
3193         conf = (const struct rte_flow_action_set_meta *)action->conf;
3194         if (!conf->mask)
3195                 return rte_flow_error_set(error, EINVAL,
3196                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3197                                           "zero mask doesn't have any effect");
3198         if (conf->mask & ~nic_mask)
3199                 return rte_flow_error_set(error, EINVAL,
3200                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3201                                           "meta data must be within reg C0");
3202         return 0;
3203 }
3204
3205 /**
3206  * Validate SET_TAG action.
3207  *
3208  * @param[in] dev
3209  *   Pointer to the rte_eth_dev structure.
3210  * @param[in] action
3211  *   Pointer to the action structure.
3212  * @param[in] action_flags
3213  *   Holds the actions detected until now.
3214  * @param[in] attr
3215  *   Pointer to flow attributes
3216  * @param[out] error
3217  *   Pointer to error structure.
3218  *
3219  * @return
3220  *   0 on success, a negative errno value otherwise and rte_errno is set.
3221  */
3222 static int
3223 flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
3224                                 const struct rte_flow_action *action,
3225                                 uint64_t action_flags,
3226                                 const struct rte_flow_attr *attr,
3227                                 struct rte_flow_error *error)
3228 {
3229         const struct rte_flow_action_set_tag *conf;
3230         const uint64_t terminal_action_flags =
3231                 MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE |
3232                 MLX5_FLOW_ACTION_RSS;
3233         int ret;
3234
3235         if (!mlx5_flow_ext_mreg_supported(dev))
3236                 return rte_flow_error_set(error, ENOTSUP,
3237                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3238                                           "extensive metadata register"
3239                                           " isn't supported");
3240         if (!(action->conf))
3241                 return rte_flow_error_set(error, EINVAL,
3242                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3243                                           "configuration cannot be null");
3244         conf = (const struct rte_flow_action_set_tag *)action->conf;
3245         if (!conf->mask)
3246                 return rte_flow_error_set(error, EINVAL,
3247                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3248                                           "zero mask doesn't have any effect");
3249         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
3250         if (ret < 0)
3251                 return ret;
3252         if (!attr->transfer && attr->ingress &&
3253             (action_flags & terminal_action_flags))
3254                 return rte_flow_error_set(error, EINVAL,
3255                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3256                                           "set_tag has no effect"
3257                                           " with terminal actions");
3258         return 0;
3259 }
3260
3261 /**
3262  * Check if action counter is shared by either old or new mechanism.
3263  *
3264  * @param[in] action
3265  *   Pointer to the action structure.
3266  *
3267  * @return
3268  *   True when counter is shared, false otherwise.
3269  */
3270 static inline bool
3271 is_shared_action_count(const struct rte_flow_action *action)
3272 {
3273         const struct rte_flow_action_count *count =
3274                         (const struct rte_flow_action_count *)action->conf;
3275
3276         if ((int)action->type == MLX5_RTE_FLOW_ACTION_TYPE_COUNT)
3277                 return true;
3278         return !!(count && count->shared);
3279 }
3280
3281 /**
3282  * Validate count action.
3283  *
3284  * @param[in] dev
3285  *   Pointer to rte_eth_dev structure.
3286  * @param[in] shared
3287  *   Indicator if action is shared.
3288  * @param[in] action_flags
3289  *   Holds the actions detected until now.
3290  * @param[out] error
3291  *   Pointer to error structure.
3292  *
3293  * @return
3294  *   0 on success, a negative errno value otherwise and rte_errno is set.
3295  */
3296 static int
3297 flow_dv_validate_action_count(struct rte_eth_dev *dev, bool shared,
3298                               uint64_t action_flags,
3299                               struct rte_flow_error *error)
3300 {
3301         struct mlx5_priv *priv = dev->data->dev_private;
3302
3303         if (!priv->config.devx)
3304                 goto notsup_err;
3305         if (action_flags & MLX5_FLOW_ACTION_COUNT)
3306                 return rte_flow_error_set(error, EINVAL,
3307                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3308                                           "duplicate count actions set");
3309         if (shared && (action_flags & MLX5_FLOW_ACTION_AGE) &&
3310             !priv->sh->flow_hit_aso_en)
3311                 return rte_flow_error_set(error, EINVAL,
3312                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3313                                           "old age and shared count combination is not supported");
3314 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
3315         return 0;
3316 #endif
3317 notsup_err:
3318         return rte_flow_error_set
3319                       (error, ENOTSUP,
3320                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3321                        NULL,
3322                        "count action not supported");
3323 }
3324
3325 /**
3326  * Validate the L2 encap action.
3327  *
3328  * @param[in] dev
3329  *   Pointer to the rte_eth_dev structure.
3330  * @param[in] action_flags
3331  *   Holds the actions detected until now.
3332  * @param[in] action
3333  *   Pointer to the action structure.
3334  * @param[in] attr
3335  *   Pointer to flow attributes.
3336  * @param[out] error
3337  *   Pointer to error structure.
3338  *
3339  * @return
3340  *   0 on success, a negative errno value otherwise and rte_errno is set.
3341  */
3342 static int
3343 flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
3344                                  uint64_t action_flags,
3345                                  const struct rte_flow_action *action,
3346                                  const struct rte_flow_attr *attr,
3347                                  struct rte_flow_error *error)
3348 {
3349         const struct mlx5_priv *priv = dev->data->dev_private;
3350
3351         if (!(action->conf))
3352                 return rte_flow_error_set(error, EINVAL,
3353                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3354                                           "configuration cannot be null");
3355         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3356                 return rte_flow_error_set(error, EINVAL,
3357                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3358                                           "can only have a single encap action "
3359                                           "in a flow");
3360         if (!attr->transfer && priv->representor)
3361                 return rte_flow_error_set(error, ENOTSUP,
3362                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3363                                           "encap action for VF representor "
3364                                           "not supported on NIC table");
3365         return 0;
3366 }
3367
3368 /**
3369  * Validate a decap action.
3370  *
3371  * @param[in] dev
3372  *   Pointer to the rte_eth_dev structure.
3373  * @param[in] action_flags
3374  *   Holds the actions detected until now.
3375  * @param[in] action
3376  *   Pointer to the action structure.
3377  * @param[in] item_flags
3378  *   Holds the items detected.
3379  * @param[in] attr
3380  *   Pointer to flow attributes
3381  * @param[out] error
3382  *   Pointer to error structure.
3383  *
3384  * @return
3385  *   0 on success, a negative errno value otherwise and rte_errno is set.
3386  */
3387 static int
3388 flow_dv_validate_action_decap(struct rte_eth_dev *dev,
3389                               uint64_t action_flags,
3390                               const struct rte_flow_action *action,
3391                               const uint64_t item_flags,
3392                               const struct rte_flow_attr *attr,
3393                               struct rte_flow_error *error)
3394 {
3395         const struct mlx5_priv *priv = dev->data->dev_private;
3396
3397         if (priv->config.hca_attr.scatter_fcs_w_decap_disable &&
3398             !priv->config.decap_en)
3399                 return rte_flow_error_set(error, ENOTSUP,
3400                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3401                                           "decap is not enabled");
3402         if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
3403                 return rte_flow_error_set(error, ENOTSUP,
3404                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3405                                           action_flags &
3406                                           MLX5_FLOW_ACTION_DECAP ? "can only "
3407                                           "have a single decap action" : "decap "
3408                                           "after encap is not supported");
3409         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
3410                 return rte_flow_error_set(error, EINVAL,
3411                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3412                                           "can't have decap action after"
3413                                           " modify action");
3414         if (attr->egress)
3415                 return rte_flow_error_set(error, ENOTSUP,
3416                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
3417                                           NULL,
3418                                           "decap action not supported for "
3419                                           "egress");
3420         if (!attr->transfer && priv->representor)
3421                 return rte_flow_error_set(error, ENOTSUP,
3422                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3423                                           "decap action for VF representor "
3424                                           "not supported on NIC table");
3425         if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP &&
3426             !(item_flags & MLX5_FLOW_LAYER_VXLAN))
3427                 return rte_flow_error_set(error, ENOTSUP,
3428                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3429                                 "VXLAN item should be present for VXLAN decap");
3430         return 0;
3431 }
3432
3433 const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,};
3434
3435 /**
3436  * Validate the raw encap and decap actions.
3437  *
3438  * @param[in] dev
3439  *   Pointer to the rte_eth_dev structure.
3440  * @param[in] decap
3441  *   Pointer to the decap action.
3442  * @param[in] encap
3443  *   Pointer to the encap action.
3444  * @param[in] attr
3445  *   Pointer to flow attributes
3446  * @param[in/out] action_flags
3447  *   Holds the actions detected until now.
3448  * @param[out] actions_n
3449  *   pointer to the number of actions counter.
3450  * @param[in] action
3451  *   Pointer to the action structure.
3452  * @param[in] item_flags
3453  *   Holds the items detected.
3454  * @param[out] error
3455  *   Pointer to error structure.
3456  *
3457  * @return
3458  *   0 on success, a negative errno value otherwise and rte_errno is set.
3459  */
3460 static int
3461 flow_dv_validate_action_raw_encap_decap
3462         (struct rte_eth_dev *dev,
3463          const struct rte_flow_action_raw_decap *decap,
3464          const struct rte_flow_action_raw_encap *encap,
3465          const struct rte_flow_attr *attr, uint64_t *action_flags,
3466          int *actions_n, const struct rte_flow_action *action,
3467          uint64_t item_flags, struct rte_flow_error *error)
3468 {
3469         const struct mlx5_priv *priv = dev->data->dev_private;
3470         int ret;
3471
3472         if (encap && (!encap->size || !encap->data))
3473                 return rte_flow_error_set(error, EINVAL,
3474                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3475                                           "raw encap data cannot be empty");
3476         if (decap && encap) {
3477                 if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE &&
3478                     encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
3479                         /* L3 encap. */
3480                         decap = NULL;
3481                 else if (encap->size <=
3482                            MLX5_ENCAPSULATION_DECISION_SIZE &&
3483                            decap->size >
3484                            MLX5_ENCAPSULATION_DECISION_SIZE)
3485                         /* L3 decap. */
3486                         encap = NULL;
3487                 else if (encap->size >
3488                            MLX5_ENCAPSULATION_DECISION_SIZE &&
3489                            decap->size >
3490                            MLX5_ENCAPSULATION_DECISION_SIZE)
3491                         /* 2 L2 actions: encap and decap. */
3492                         ;
3493                 else
3494                         return rte_flow_error_set(error,
3495                                 ENOTSUP,
3496                                 RTE_FLOW_ERROR_TYPE_ACTION,
3497                                 NULL, "unsupported too small "
3498                                 "raw decap and too small raw "
3499                                 "encap combination");
3500         }
3501         if (decap) {
3502                 ret = flow_dv_validate_action_decap(dev, *action_flags, action,
3503                                                     item_flags, attr, error);
3504                 if (ret < 0)
3505                         return ret;
3506                 *action_flags |= MLX5_FLOW_ACTION_DECAP;
3507                 ++(*actions_n);
3508         }
3509         if (encap) {
3510                 if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE)
3511                         return rte_flow_error_set(error, ENOTSUP,
3512                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3513                                                   NULL,
3514                                                   "small raw encap size");
3515                 if (*action_flags & MLX5_FLOW_ACTION_ENCAP)
3516                         return rte_flow_error_set(error, EINVAL,
3517                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3518                                                   NULL,
3519                                                   "more than one encap action");
3520                 if (!attr->transfer && priv->representor)
3521                         return rte_flow_error_set
3522                                         (error, ENOTSUP,
3523                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3524                                          "encap action for VF representor "
3525                                          "not supported on NIC table");
3526                 *action_flags |= MLX5_FLOW_ACTION_ENCAP;
3527                 ++(*actions_n);
3528         }
3529         return 0;
3530 }
3531
3532 /*
3533  * Validate the ASO CT action.
3534  *
3535  * @param[in] dev
3536  *   Pointer to the rte_eth_dev structure.
3537  * @param[in] action_flags
3538  *   Holds the actions detected until now.
3539  * @param[in] item_flags
3540  *   The items found in this flow rule.
3541  * @param[in] attr
3542  *   Pointer to flow attributes.
3543  * @param[out] error
3544  *   Pointer to error structure.
3545  *
3546  * @return
3547  *   0 on success, a negative errno value otherwise and rte_errno is set.
3548  */
3549 static int
3550 flow_dv_validate_action_aso_ct(struct rte_eth_dev *dev,
3551                                uint64_t action_flags,
3552                                uint64_t item_flags,
3553                                const struct rte_flow_attr *attr,
3554                                struct rte_flow_error *error)
3555 {
3556         RTE_SET_USED(dev);
3557
3558         if (attr->group == 0 && !attr->transfer)
3559                 return rte_flow_error_set(error, ENOTSUP,
3560                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3561                                           NULL,
3562                                           "Only support non-root table");
3563         if (action_flags & MLX5_FLOW_FATE_ACTIONS)
3564                 return rte_flow_error_set(error, ENOTSUP,
3565                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3566                                           "CT cannot follow a fate action");
3567         if ((action_flags & MLX5_FLOW_ACTION_METER) ||
3568             (action_flags & MLX5_FLOW_ACTION_AGE))
3569                 return rte_flow_error_set(error, EINVAL,
3570                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3571                                           "Only one ASO action is supported");
3572         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3573                 return rte_flow_error_set(error, EINVAL,
3574                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3575                                           "Encap cannot exist before CT");
3576         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
3577                 return rte_flow_error_set(error, EINVAL,
3578                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3579                                           "Not a outer TCP packet");
3580         return 0;
3581 }
3582
3583 /**
3584  * Match encap_decap resource.
3585  *
3586  * @param list
3587  *   Pointer to the hash list.
3588  * @param entry
3589  *   Pointer to exist resource entry object.
3590  * @param key
3591  *   Key of the new entry.
3592  * @param ctx_cb
3593  *   Pointer to new encap_decap resource.
3594  *
3595  * @return
3596  *   0 on matching, none-zero otherwise.
3597  */
3598 int
3599 flow_dv_encap_decap_match_cb(struct mlx5_hlist *list __rte_unused,
3600                              struct mlx5_hlist_entry *entry,
3601                              uint64_t key __rte_unused, void *cb_ctx)
3602 {
3603         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3604         struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
3605         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
3606
3607         cache_resource = container_of(entry,
3608                                       struct mlx5_flow_dv_encap_decap_resource,
3609                                       entry);
3610         if (resource->reformat_type == cache_resource->reformat_type &&
3611             resource->ft_type == cache_resource->ft_type &&
3612             resource->flags == cache_resource->flags &&
3613             resource->size == cache_resource->size &&
3614             !memcmp((const void *)resource->buf,
3615                     (const void *)cache_resource->buf,
3616                     resource->size))
3617                 return 0;
3618         return -1;
3619 }
3620
3621 /**
3622  * Allocate encap_decap resource.
3623  *
3624  * @param list
3625  *   Pointer to the hash list.
3626  * @param entry
3627  *   Pointer to exist resource entry object.
3628  * @param ctx_cb
3629  *   Pointer to new encap_decap resource.
3630  *
3631  * @return
3632  *   0 on matching, none-zero otherwise.
3633  */
3634 struct mlx5_hlist_entry *
3635 flow_dv_encap_decap_create_cb(struct mlx5_hlist *list,
3636                               uint64_t key __rte_unused,
3637                               void *cb_ctx)
3638 {
3639         struct mlx5_dev_ctx_shared *sh = list->ctx;
3640         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3641         struct mlx5dv_dr_domain *domain;
3642         struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
3643         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
3644         uint32_t idx;
3645         int ret;
3646
3647         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3648                 domain = sh->fdb_domain;
3649         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3650                 domain = sh->rx_domain;
3651         else
3652                 domain = sh->tx_domain;
3653         /* Register new encap/decap resource. */
3654         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
3655                                        &idx);
3656         if (!cache_resource) {
3657                 rte_flow_error_set(ctx->error, ENOMEM,
3658                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3659                                    "cannot allocate resource memory");
3660                 return NULL;
3661         }
3662         *cache_resource = *resource;
3663         cache_resource->idx = idx;
3664         ret = mlx5_flow_os_create_flow_action_packet_reformat
3665                                         (sh->ctx, domain, cache_resource,
3666                                          &cache_resource->action);
3667         if (ret) {
3668                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
3669                 rte_flow_error_set(ctx->error, ENOMEM,
3670                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3671                                    NULL, "cannot create action");
3672                 return NULL;
3673         }
3674
3675         return &cache_resource->entry;
3676 }
3677
3678 /**
3679  * Find existing encap/decap resource or create and register a new one.
3680  *
3681  * @param[in, out] dev
3682  *   Pointer to rte_eth_dev structure.
3683  * @param[in, out] resource
3684  *   Pointer to encap/decap resource.
3685  * @parm[in, out] dev_flow
3686  *   Pointer to the dev_flow.
3687  * @param[out] error
3688  *   pointer to error structure.
3689  *
3690  * @return
3691  *   0 on success otherwise -errno and errno is set.
3692  */
3693 static int
3694 flow_dv_encap_decap_resource_register
3695                         (struct rte_eth_dev *dev,
3696                          struct mlx5_flow_dv_encap_decap_resource *resource,
3697                          struct mlx5_flow *dev_flow,
3698                          struct rte_flow_error *error)
3699 {
3700         struct mlx5_priv *priv = dev->data->dev_private;
3701         struct mlx5_dev_ctx_shared *sh = priv->sh;
3702         struct mlx5_hlist_entry *entry;
3703         union {
3704                 struct {
3705                         uint32_t ft_type:8;
3706                         uint32_t refmt_type:8;
3707                         /*
3708                          * Header reformat actions can be shared between
3709                          * non-root tables. One bit to indicate non-root
3710                          * table or not.
3711                          */
3712                         uint32_t is_root:1;
3713                         uint32_t reserve:15;
3714                 };
3715                 uint32_t v32;
3716         } encap_decap_key = {
3717                 {
3718                         .ft_type = resource->ft_type,
3719                         .refmt_type = resource->reformat_type,
3720                         .is_root = !!dev_flow->dv.group,
3721                         .reserve = 0,
3722                 }
3723         };
3724         struct mlx5_flow_cb_ctx ctx = {
3725                 .error = error,
3726                 .data = resource,
3727         };
3728         uint64_t key64;
3729
3730         resource->flags = dev_flow->dv.group ? 0 : 1;
3731         key64 =  __rte_raw_cksum(&encap_decap_key.v32,
3732                                  sizeof(encap_decap_key.v32), 0);
3733         if (resource->reformat_type !=
3734             MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2 &&
3735             resource->size)
3736                 key64 = __rte_raw_cksum(resource->buf, resource->size, key64);
3737         entry = mlx5_hlist_register(sh->encaps_decaps, key64, &ctx);
3738         if (!entry)
3739                 return -rte_errno;
3740         resource = container_of(entry, typeof(*resource), entry);
3741         dev_flow->dv.encap_decap = resource;
3742         dev_flow->handle->dvh.rix_encap_decap = resource->idx;
3743         return 0;
3744 }
3745
3746 /**
3747  * Find existing table jump resource or create and register a new one.
3748  *
3749  * @param[in, out] dev
3750  *   Pointer to rte_eth_dev structure.
3751  * @param[in, out] tbl
3752  *   Pointer to flow table resource.
3753  * @parm[in, out] dev_flow
3754  *   Pointer to the dev_flow.
3755  * @param[out] error
3756  *   pointer to error structure.
3757  *
3758  * @return
3759  *   0 on success otherwise -errno and errno is set.
3760  */
3761 static int
3762 flow_dv_jump_tbl_resource_register
3763                         (struct rte_eth_dev *dev __rte_unused,
3764                          struct mlx5_flow_tbl_resource *tbl,
3765                          struct mlx5_flow *dev_flow,
3766                          struct rte_flow_error *error __rte_unused)
3767 {
3768         struct mlx5_flow_tbl_data_entry *tbl_data =
3769                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
3770
3771         MLX5_ASSERT(tbl);
3772         MLX5_ASSERT(tbl_data->jump.action);
3773         dev_flow->handle->rix_jump = tbl_data->idx;
3774         dev_flow->dv.jump = &tbl_data->jump;
3775         return 0;
3776 }
3777
3778 int
3779 flow_dv_port_id_match_cb(struct mlx5_cache_list *list __rte_unused,
3780                          struct mlx5_cache_entry *entry, void *cb_ctx)
3781 {
3782         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3783         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3784         struct mlx5_flow_dv_port_id_action_resource *res =
3785                         container_of(entry, typeof(*res), entry);
3786
3787         return ref->port_id != res->port_id;
3788 }
3789
3790 struct mlx5_cache_entry *
3791 flow_dv_port_id_create_cb(struct mlx5_cache_list *list,
3792                           struct mlx5_cache_entry *entry __rte_unused,
3793                           void *cb_ctx)
3794 {
3795         struct mlx5_dev_ctx_shared *sh = list->ctx;
3796         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3797         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3798         struct mlx5_flow_dv_port_id_action_resource *cache;
3799         uint32_t idx;
3800         int ret;
3801
3802         /* Register new port id action resource. */
3803         cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
3804         if (!cache) {
3805                 rte_flow_error_set(ctx->error, ENOMEM,
3806                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3807                                    "cannot allocate port_id action cache memory");
3808                 return NULL;
3809         }
3810         *cache = *ref;
3811         ret = mlx5_flow_os_create_flow_action_dest_port(sh->fdb_domain,
3812                                                         ref->port_id,
3813                                                         &cache->action);
3814         if (ret) {
3815                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], idx);
3816                 rte_flow_error_set(ctx->error, ENOMEM,
3817                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3818                                    "cannot create action");
3819                 return NULL;
3820         }
3821         cache->idx = idx;
3822         return &cache->entry;
3823 }
3824
3825 /**
3826  * Find existing table port ID resource or create and register a new one.
3827  *
3828  * @param[in, out] dev
3829  *   Pointer to rte_eth_dev structure.
3830  * @param[in, out] resource
3831  *   Pointer to port ID action resource.
3832  * @parm[in, out] dev_flow
3833  *   Pointer to the dev_flow.
3834  * @param[out] error
3835  *   pointer to error structure.
3836  *
3837  * @return
3838  *   0 on success otherwise -errno and errno is set.
3839  */
3840 static int
3841 flow_dv_port_id_action_resource_register
3842                         (struct rte_eth_dev *dev,
3843                          struct mlx5_flow_dv_port_id_action_resource *resource,
3844                          struct mlx5_flow *dev_flow,
3845                          struct rte_flow_error *error)
3846 {
3847         struct mlx5_priv *priv = dev->data->dev_private;
3848         struct mlx5_cache_entry *entry;
3849         struct mlx5_flow_dv_port_id_action_resource *cache;
3850         struct mlx5_flow_cb_ctx ctx = {
3851                 .error = error,
3852                 .data = resource,
3853         };
3854
3855         entry = mlx5_cache_register(&priv->sh->port_id_action_list, &ctx);
3856         if (!entry)
3857                 return -rte_errno;
3858         cache = container_of(entry, typeof(*cache), entry);
3859         dev_flow->dv.port_id_action = cache;
3860         dev_flow->handle->rix_port_id_action = cache->idx;
3861         return 0;
3862 }
3863
3864 int
3865 flow_dv_push_vlan_match_cb(struct mlx5_cache_list *list __rte_unused,
3866                          struct mlx5_cache_entry *entry, void *cb_ctx)
3867 {
3868         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3869         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3870         struct mlx5_flow_dv_push_vlan_action_resource *res =
3871                         container_of(entry, typeof(*res), entry);
3872
3873         return ref->vlan_tag != res->vlan_tag || ref->ft_type != res->ft_type;
3874 }
3875
3876 struct mlx5_cache_entry *
3877 flow_dv_push_vlan_create_cb(struct mlx5_cache_list *list,
3878                           struct mlx5_cache_entry *entry __rte_unused,
3879                           void *cb_ctx)
3880 {
3881         struct mlx5_dev_ctx_shared *sh = list->ctx;
3882         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3883         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3884         struct mlx5_flow_dv_push_vlan_action_resource *cache;
3885         struct mlx5dv_dr_domain *domain;
3886         uint32_t idx;
3887         int ret;
3888
3889         /* Register new port id action resource. */
3890         cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
3891         if (!cache) {
3892                 rte_flow_error_set(ctx->error, ENOMEM,
3893                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3894                                    "cannot allocate push_vlan action cache memory");
3895                 return NULL;
3896         }
3897         *cache = *ref;
3898         if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3899                 domain = sh->fdb_domain;
3900         else if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3901                 domain = sh->rx_domain;
3902         else
3903                 domain = sh->tx_domain;
3904         ret = mlx5_flow_os_create_flow_action_push_vlan(domain, ref->vlan_tag,
3905                                                         &cache->action);
3906         if (ret) {
3907                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
3908                 rte_flow_error_set(ctx->error, ENOMEM,
3909                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3910                                    "cannot create push vlan action");
3911                 return NULL;
3912         }
3913         cache->idx = idx;
3914         return &cache->entry;
3915 }
3916
3917 /**
3918  * Find existing push vlan resource or create and register a new one.
3919  *
3920  * @param [in, out] dev
3921  *   Pointer to rte_eth_dev structure.
3922  * @param[in, out] resource
3923  *   Pointer to port ID action resource.
3924  * @parm[in, out] dev_flow
3925  *   Pointer to the dev_flow.
3926  * @param[out] error
3927  *   pointer to error structure.
3928  *
3929  * @return
3930  *   0 on success otherwise -errno and errno is set.
3931  */
3932 static int
3933 flow_dv_push_vlan_action_resource_register
3934                        (struct rte_eth_dev *dev,
3935                         struct mlx5_flow_dv_push_vlan_action_resource *resource,
3936                         struct mlx5_flow *dev_flow,
3937                         struct rte_flow_error *error)
3938 {
3939         struct mlx5_priv *priv = dev->data->dev_private;
3940         struct mlx5_flow_dv_push_vlan_action_resource *cache;
3941         struct mlx5_cache_entry *entry;
3942         struct mlx5_flow_cb_ctx ctx = {
3943                 .error = error,
3944                 .data = resource,
3945         };
3946
3947         entry = mlx5_cache_register(&priv->sh->push_vlan_action_list, &ctx);
3948         if (!entry)
3949                 return -rte_errno;
3950         cache = container_of(entry, typeof(*cache), entry);
3951
3952         dev_flow->handle->dvh.rix_push_vlan = cache->idx;
3953         dev_flow->dv.push_vlan_res = cache;
3954         return 0;
3955 }
3956
3957 /**
3958  * Get the size of specific rte_flow_item_type hdr size
3959  *
3960  * @param[in] item_type
3961  *   Tested rte_flow_item_type.
3962  *
3963  * @return
3964  *   sizeof struct item_type, 0 if void or irrelevant.
3965  */
3966 static size_t
3967 flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type)
3968 {
3969         size_t retval;
3970
3971         switch (item_type) {
3972         case RTE_FLOW_ITEM_TYPE_ETH:
3973                 retval = sizeof(struct rte_ether_hdr);
3974                 break;
3975         case RTE_FLOW_ITEM_TYPE_VLAN:
3976                 retval = sizeof(struct rte_vlan_hdr);
3977                 break;
3978         case RTE_FLOW_ITEM_TYPE_IPV4:
3979                 retval = sizeof(struct rte_ipv4_hdr);
3980                 break;
3981         case RTE_FLOW_ITEM_TYPE_IPV6:
3982                 retval = sizeof(struct rte_ipv6_hdr);
3983                 break;
3984         case RTE_FLOW_ITEM_TYPE_UDP:
3985                 retval = sizeof(struct rte_udp_hdr);
3986                 break;
3987         case RTE_FLOW_ITEM_TYPE_TCP:
3988                 retval = sizeof(struct rte_tcp_hdr);
3989                 break;
3990         case RTE_FLOW_ITEM_TYPE_VXLAN:
3991         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3992                 retval = sizeof(struct rte_vxlan_hdr);
3993                 break;
3994         case RTE_FLOW_ITEM_TYPE_GRE:
3995         case RTE_FLOW_ITEM_TYPE_NVGRE:
3996                 retval = sizeof(struct rte_gre_hdr);
3997                 break;
3998         case RTE_FLOW_ITEM_TYPE_MPLS:
3999                 retval = sizeof(struct rte_mpls_hdr);
4000                 break;
4001         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
4002         default:
4003                 retval = 0;
4004                 break;
4005         }
4006         return retval;
4007 }
4008
4009 #define MLX5_ENCAP_IPV4_VERSION         0x40
4010 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
4011 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
4012 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
4013 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
4014 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
4015 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
4016
4017 /**
4018  * Convert the encap action data from list of rte_flow_item to raw buffer
4019  *
4020  * @param[in] items
4021  *   Pointer to rte_flow_item objects list.
4022  * @param[out] buf
4023  *   Pointer to the output buffer.
4024  * @param[out] size
4025  *   Pointer to the output buffer size.
4026  * @param[out] error
4027  *   Pointer to the error structure.
4028  *
4029  * @return
4030  *   0 on success, a negative errno value otherwise and rte_errno is set.
4031  */
4032 static int
4033 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
4034                            size_t *size, struct rte_flow_error *error)
4035 {
4036         struct rte_ether_hdr *eth = NULL;
4037         struct rte_vlan_hdr *vlan = NULL;
4038         struct rte_ipv4_hdr *ipv4 = NULL;
4039         struct rte_ipv6_hdr *ipv6 = NULL;
4040         struct rte_udp_hdr *udp = NULL;
4041         struct rte_vxlan_hdr *vxlan = NULL;
4042         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
4043         struct rte_gre_hdr *gre = NULL;
4044         size_t len;
4045         size_t temp_size = 0;
4046
4047         if (!items)
4048                 return rte_flow_error_set(error, EINVAL,
4049                                           RTE_FLOW_ERROR_TYPE_ACTION,
4050                                           NULL, "invalid empty data");
4051         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
4052                 len = flow_dv_get_item_hdr_len(items->type);
4053                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
4054                         return rte_flow_error_set(error, EINVAL,
4055                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4056                                                   (void *)items->type,
4057                                                   "items total size is too big"
4058                                                   " for encap action");
4059                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
4060                 switch (items->type) {
4061                 case RTE_FLOW_ITEM_TYPE_ETH:
4062                         eth = (struct rte_ether_hdr *)&buf[temp_size];
4063                         break;
4064                 case RTE_FLOW_ITEM_TYPE_VLAN:
4065                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
4066                         if (!eth)
4067                                 return rte_flow_error_set(error, EINVAL,
4068                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4069                                                 (void *)items->type,
4070                                                 "eth header not found");
4071                         if (!eth->ether_type)
4072                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
4073                         break;
4074                 case RTE_FLOW_ITEM_TYPE_IPV4:
4075                         ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
4076                         if (!vlan && !eth)
4077                                 return rte_flow_error_set(error, EINVAL,
4078                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4079                                                 (void *)items->type,
4080                                                 "neither eth nor vlan"
4081                                                 " header found");
4082                         if (vlan && !vlan->eth_proto)
4083                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
4084                         else if (eth && !eth->ether_type)
4085                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
4086                         if (!ipv4->version_ihl)
4087                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
4088                                                     MLX5_ENCAP_IPV4_IHL_MIN;
4089                         if (!ipv4->time_to_live)
4090                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
4091                         break;
4092                 case RTE_FLOW_ITEM_TYPE_IPV6:
4093                         ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
4094                         if (!vlan && !eth)
4095                                 return rte_flow_error_set(error, EINVAL,
4096                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4097                                                 (void *)items->type,
4098                                                 "neither eth nor vlan"
4099                                                 " header found");
4100                         if (vlan && !vlan->eth_proto)
4101                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
4102                         else if (eth && !eth->ether_type)
4103                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
4104                         if (!ipv6->vtc_flow)
4105                                 ipv6->vtc_flow =
4106                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
4107                         if (!ipv6->hop_limits)
4108                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
4109                         break;
4110                 case RTE_FLOW_ITEM_TYPE_UDP:
4111                         udp = (struct rte_udp_hdr *)&buf[temp_size];
4112                         if (!ipv4 && !ipv6)
4113                                 return rte_flow_error_set(error, EINVAL,
4114                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4115                                                 (void *)items->type,
4116                                                 "ip header not found");
4117                         if (ipv4 && !ipv4->next_proto_id)
4118                                 ipv4->next_proto_id = IPPROTO_UDP;
4119                         else if (ipv6 && !ipv6->proto)
4120                                 ipv6->proto = IPPROTO_UDP;
4121                         break;
4122                 case RTE_FLOW_ITEM_TYPE_VXLAN:
4123                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
4124                         if (!udp)
4125                                 return rte_flow_error_set(error, EINVAL,
4126                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4127                                                 (void *)items->type,
4128                                                 "udp header not found");
4129                         if (!udp->dst_port)
4130                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
4131                         if (!vxlan->vx_flags)
4132                                 vxlan->vx_flags =
4133                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
4134                         break;
4135                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
4136                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
4137                         if (!udp)
4138                                 return rte_flow_error_set(error, EINVAL,
4139                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4140                                                 (void *)items->type,
4141                                                 "udp header not found");
4142                         if (!vxlan_gpe->proto)
4143                                 return rte_flow_error_set(error, EINVAL,
4144                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4145                                                 (void *)items->type,
4146                                                 "next protocol not found");
4147                         if (!udp->dst_port)
4148                                 udp->dst_port =
4149                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
4150                         if (!vxlan_gpe->vx_flags)
4151                                 vxlan_gpe->vx_flags =
4152                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
4153                         break;
4154                 case RTE_FLOW_ITEM_TYPE_GRE:
4155                 case RTE_FLOW_ITEM_TYPE_NVGRE:
4156                         gre = (struct rte_gre_hdr *)&buf[temp_size];
4157                         if (!gre->proto)
4158                                 return rte_flow_error_set(error, EINVAL,
4159                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4160                                                 (void *)items->type,
4161                                                 "next protocol not found");
4162                         if (!ipv4 && !ipv6)
4163                                 return rte_flow_error_set(error, EINVAL,
4164                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4165                                                 (void *)items->type,
4166                                                 "ip header not found");
4167                         if (ipv4 && !ipv4->next_proto_id)
4168                                 ipv4->next_proto_id = IPPROTO_GRE;
4169                         else if (ipv6 && !ipv6->proto)
4170                                 ipv6->proto = IPPROTO_GRE;
4171                         break;
4172                 case RTE_FLOW_ITEM_TYPE_VOID:
4173                         break;
4174                 default:
4175                         return rte_flow_error_set(error, EINVAL,
4176                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4177                                                   (void *)items->type,
4178                                                   "unsupported item type");
4179                         break;
4180                 }
4181                 temp_size += len;
4182         }
4183         *size = temp_size;
4184         return 0;
4185 }
4186
4187 static int
4188 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
4189 {
4190         struct rte_ether_hdr *eth = NULL;
4191         struct rte_vlan_hdr *vlan = NULL;
4192         struct rte_ipv6_hdr *ipv6 = NULL;
4193         struct rte_udp_hdr *udp = NULL;
4194         char *next_hdr;
4195         uint16_t proto;
4196
4197         eth = (struct rte_ether_hdr *)data;
4198         next_hdr = (char *)(eth + 1);
4199         proto = RTE_BE16(eth->ether_type);
4200
4201         /* VLAN skipping */
4202         while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
4203                 vlan = (struct rte_vlan_hdr *)next_hdr;
4204                 proto = RTE_BE16(vlan->eth_proto);
4205                 next_hdr += sizeof(struct rte_vlan_hdr);
4206         }
4207
4208         /* HW calculates IPv4 csum. no need to proceed */
4209         if (proto == RTE_ETHER_TYPE_IPV4)
4210                 return 0;
4211
4212         /* non IPv4/IPv6 header. not supported */
4213         if (proto != RTE_ETHER_TYPE_IPV6) {
4214                 return rte_flow_error_set(error, ENOTSUP,
4215                                           RTE_FLOW_ERROR_TYPE_ACTION,
4216                                           NULL, "Cannot offload non IPv4/IPv6");
4217         }
4218
4219         ipv6 = (struct rte_ipv6_hdr *)next_hdr;
4220
4221         /* ignore non UDP */
4222         if (ipv6->proto != IPPROTO_UDP)
4223                 return 0;
4224
4225         udp = (struct rte_udp_hdr *)(ipv6 + 1);
4226         udp->dgram_cksum = 0;
4227
4228         return 0;
4229 }
4230
4231 /**
4232  * Convert L2 encap action to DV specification.
4233  *
4234  * @param[in] dev
4235  *   Pointer to rte_eth_dev structure.
4236  * @param[in] action
4237  *   Pointer to action structure.
4238  * @param[in, out] dev_flow
4239  *   Pointer to the mlx5_flow.
4240  * @param[in] transfer
4241  *   Mark if the flow is E-Switch flow.
4242  * @param[out] error
4243  *   Pointer to the error structure.
4244  *
4245  * @return
4246  *   0 on success, a negative errno value otherwise and rte_errno is set.
4247  */
4248 static int
4249 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
4250                                const struct rte_flow_action *action,
4251                                struct mlx5_flow *dev_flow,
4252                                uint8_t transfer,
4253                                struct rte_flow_error *error)
4254 {
4255         const struct rte_flow_item *encap_data;
4256         const struct rte_flow_action_raw_encap *raw_encap_data;
4257         struct mlx5_flow_dv_encap_decap_resource res = {
4258                 .reformat_type =
4259                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
4260                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4261                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
4262         };
4263
4264         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
4265                 raw_encap_data =
4266                         (const struct rte_flow_action_raw_encap *)action->conf;
4267                 res.size = raw_encap_data->size;
4268                 memcpy(res.buf, raw_encap_data->data, res.size);
4269         } else {
4270                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
4271                         encap_data =
4272                                 ((const struct rte_flow_action_vxlan_encap *)
4273                                                 action->conf)->definition;
4274                 else
4275                         encap_data =
4276                                 ((const struct rte_flow_action_nvgre_encap *)
4277                                                 action->conf)->definition;
4278                 if (flow_dv_convert_encap_data(encap_data, res.buf,
4279                                                &res.size, error))
4280                         return -rte_errno;
4281         }
4282         if (flow_dv_zero_encap_udp_csum(res.buf, error))
4283                 return -rte_errno;
4284         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4285                 return rte_flow_error_set(error, EINVAL,
4286                                           RTE_FLOW_ERROR_TYPE_ACTION,
4287                                           NULL, "can't create L2 encap action");
4288         return 0;
4289 }
4290
4291 /**
4292  * Convert L2 decap action to DV specification.
4293  *
4294  * @param[in] dev
4295  *   Pointer to rte_eth_dev structure.
4296  * @param[in, out] dev_flow
4297  *   Pointer to the mlx5_flow.
4298  * @param[in] transfer
4299  *   Mark if the flow is E-Switch flow.
4300  * @param[out] error
4301  *   Pointer to the error structure.
4302  *
4303  * @return
4304  *   0 on success, a negative errno value otherwise and rte_errno is set.
4305  */
4306 static int
4307 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
4308                                struct mlx5_flow *dev_flow,
4309                                uint8_t transfer,
4310                                struct rte_flow_error *error)
4311 {
4312         struct mlx5_flow_dv_encap_decap_resource res = {
4313                 .size = 0,
4314                 .reformat_type =
4315                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
4316                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4317                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
4318         };
4319
4320         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4321                 return rte_flow_error_set(error, EINVAL,
4322                                           RTE_FLOW_ERROR_TYPE_ACTION,
4323                                           NULL, "can't create L2 decap action");
4324         return 0;
4325 }
4326
4327 /**
4328  * Convert raw decap/encap (L3 tunnel) action to DV specification.
4329  *
4330  * @param[in] dev
4331  *   Pointer to rte_eth_dev structure.
4332  * @param[in] action
4333  *   Pointer to action structure.
4334  * @param[in, out] dev_flow
4335  *   Pointer to the mlx5_flow.
4336  * @param[in] attr
4337  *   Pointer to the flow attributes.
4338  * @param[out] error
4339  *   Pointer to the error structure.
4340  *
4341  * @return
4342  *   0 on success, a negative errno value otherwise and rte_errno is set.
4343  */
4344 static int
4345 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
4346                                 const struct rte_flow_action *action,
4347                                 struct mlx5_flow *dev_flow,
4348                                 const struct rte_flow_attr *attr,
4349                                 struct rte_flow_error *error)
4350 {
4351         const struct rte_flow_action_raw_encap *encap_data;
4352         struct mlx5_flow_dv_encap_decap_resource res;
4353
4354         memset(&res, 0, sizeof(res));
4355         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
4356         res.size = encap_data->size;
4357         memcpy(res.buf, encap_data->data, res.size);
4358         res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
4359                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
4360                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
4361         if (attr->transfer)
4362                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4363         else
4364                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4365                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4366         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4367                 return rte_flow_error_set(error, EINVAL,
4368                                           RTE_FLOW_ERROR_TYPE_ACTION,
4369                                           NULL, "can't create encap action");
4370         return 0;
4371 }
4372
4373 /**
4374  * Create action push VLAN.
4375  *
4376  * @param[in] dev
4377  *   Pointer to rte_eth_dev structure.
4378  * @param[in] attr
4379  *   Pointer to the flow attributes.
4380  * @param[in] vlan
4381  *   Pointer to the vlan to push to the Ethernet header.
4382  * @param[in, out] dev_flow
4383  *   Pointer to the mlx5_flow.
4384  * @param[out] error
4385  *   Pointer to the error structure.
4386  *
4387  * @return
4388  *   0 on success, a negative errno value otherwise and rte_errno is set.
4389  */
4390 static int
4391 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
4392                                 const struct rte_flow_attr *attr,
4393                                 const struct rte_vlan_hdr *vlan,
4394                                 struct mlx5_flow *dev_flow,
4395                                 struct rte_flow_error *error)
4396 {
4397         struct mlx5_flow_dv_push_vlan_action_resource res;
4398
4399         memset(&res, 0, sizeof(res));
4400         res.vlan_tag =
4401                 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
4402                                  vlan->vlan_tci);
4403         if (attr->transfer)
4404                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4405         else
4406                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4407                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4408         return flow_dv_push_vlan_action_resource_register
4409                                             (dev, &res, dev_flow, error);
4410 }
4411
4412 /**
4413  * Validate the modify-header actions.
4414  *
4415  * @param[in] action_flags
4416  *   Holds the actions detected until now.
4417  * @param[in] action
4418  *   Pointer to the modify action.
4419  * @param[out] error
4420  *   Pointer to error structure.
4421  *
4422  * @return
4423  *   0 on success, a negative errno value otherwise and rte_errno is set.
4424  */
4425 static int
4426 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
4427                                    const struct rte_flow_action *action,
4428                                    struct rte_flow_error *error)
4429 {
4430         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
4431                 return rte_flow_error_set(error, EINVAL,
4432                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4433                                           NULL, "action configuration not set");
4434         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
4435                 return rte_flow_error_set(error, EINVAL,
4436                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4437                                           "can't have encap action before"
4438                                           " modify action");
4439         return 0;
4440 }
4441
4442 /**
4443  * Validate the modify-header MAC address actions.
4444  *
4445  * @param[in] action_flags
4446  *   Holds the actions detected until now.
4447  * @param[in] action
4448  *   Pointer to the modify action.
4449  * @param[in] item_flags
4450  *   Holds the items detected.
4451  * @param[out] error
4452  *   Pointer to error structure.
4453  *
4454  * @return
4455  *   0 on success, a negative errno value otherwise and rte_errno is set.
4456  */
4457 static int
4458 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
4459                                    const struct rte_flow_action *action,
4460                                    const uint64_t item_flags,
4461                                    struct rte_flow_error *error)
4462 {
4463         int ret = 0;
4464
4465         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4466         if (!ret) {
4467                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
4468                         return rte_flow_error_set(error, EINVAL,
4469                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4470                                                   NULL,
4471                                                   "no L2 item in pattern");
4472         }
4473         return ret;
4474 }
4475
4476 /**
4477  * Validate the modify-header IPv4 address actions.
4478  *
4479  * @param[in] action_flags
4480  *   Holds the actions detected until now.
4481  * @param[in] action
4482  *   Pointer to the modify action.
4483  * @param[in] item_flags
4484  *   Holds the items detected.
4485  * @param[out] error
4486  *   Pointer to error structure.
4487  *
4488  * @return
4489  *   0 on success, a negative errno value otherwise and rte_errno is set.
4490  */
4491 static int
4492 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
4493                                     const struct rte_flow_action *action,
4494                                     const uint64_t item_flags,
4495                                     struct rte_flow_error *error)
4496 {
4497         int ret = 0;
4498         uint64_t layer;
4499
4500         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4501         if (!ret) {
4502                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4503                                  MLX5_FLOW_LAYER_INNER_L3_IPV4 :
4504                                  MLX5_FLOW_LAYER_OUTER_L3_IPV4;
4505                 if (!(item_flags & layer))
4506                         return rte_flow_error_set(error, EINVAL,
4507                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4508                                                   NULL,
4509                                                   "no ipv4 item in pattern");
4510         }
4511         return ret;
4512 }
4513
4514 /**
4515  * Validate the modify-header IPv6 address actions.
4516  *
4517  * @param[in] action_flags
4518  *   Holds the actions detected until now.
4519  * @param[in] action
4520  *   Pointer to the modify action.
4521  * @param[in] item_flags
4522  *   Holds the items detected.
4523  * @param[out] error
4524  *   Pointer to error structure.
4525  *
4526  * @return
4527  *   0 on success, a negative errno value otherwise and rte_errno is set.
4528  */
4529 static int
4530 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
4531                                     const struct rte_flow_action *action,
4532                                     const uint64_t item_flags,
4533                                     struct rte_flow_error *error)
4534 {
4535         int ret = 0;
4536         uint64_t layer;
4537
4538         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4539         if (!ret) {
4540                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4541                                  MLX5_FLOW_LAYER_INNER_L3_IPV6 :
4542                                  MLX5_FLOW_LAYER_OUTER_L3_IPV6;
4543                 if (!(item_flags & layer))
4544                         return rte_flow_error_set(error, EINVAL,
4545                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4546                                                   NULL,
4547                                                   "no ipv6 item in pattern");
4548         }
4549         return ret;
4550 }
4551
4552 /**
4553  * Validate the modify-header TP actions.
4554  *
4555  * @param[in] action_flags
4556  *   Holds the actions detected until now.
4557  * @param[in] action
4558  *   Pointer to the modify action.
4559  * @param[in] item_flags
4560  *   Holds the items detected.
4561  * @param[out] error
4562  *   Pointer to error structure.
4563  *
4564  * @return
4565  *   0 on success, a negative errno value otherwise and rte_errno is set.
4566  */
4567 static int
4568 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
4569                                   const struct rte_flow_action *action,
4570                                   const uint64_t item_flags,
4571                                   struct rte_flow_error *error)
4572 {
4573         int ret = 0;
4574         uint64_t layer;
4575
4576         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4577         if (!ret) {
4578                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4579                                  MLX5_FLOW_LAYER_INNER_L4 :
4580                                  MLX5_FLOW_LAYER_OUTER_L4;
4581                 if (!(item_flags & layer))
4582                         return rte_flow_error_set(error, EINVAL,
4583                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4584                                                   NULL, "no transport layer "
4585                                                   "in pattern");
4586         }
4587         return ret;
4588 }
4589
4590 /**
4591  * Validate the modify-header actions of increment/decrement
4592  * TCP Sequence-number.
4593  *
4594  * @param[in] action_flags
4595  *   Holds the actions detected until now.
4596  * @param[in] action
4597  *   Pointer to the modify action.
4598  * @param[in] item_flags
4599  *   Holds the items detected.
4600  * @param[out] error
4601  *   Pointer to error structure.
4602  *
4603  * @return
4604  *   0 on success, a negative errno value otherwise and rte_errno is set.
4605  */
4606 static int
4607 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
4608                                        const struct rte_flow_action *action,
4609                                        const uint64_t item_flags,
4610                                        struct rte_flow_error *error)
4611 {
4612         int ret = 0;
4613         uint64_t layer;
4614
4615         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4616         if (!ret) {
4617                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4618                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
4619                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
4620                 if (!(item_flags & layer))
4621                         return rte_flow_error_set(error, EINVAL,
4622                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4623                                                   NULL, "no TCP item in"
4624                                                   " pattern");
4625                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
4626                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
4627                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
4628                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
4629                         return rte_flow_error_set(error, EINVAL,
4630                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4631                                                   NULL,
4632                                                   "cannot decrease and increase"
4633                                                   " TCP sequence number"
4634                                                   " at the same time");
4635         }
4636         return ret;
4637 }
4638
4639 /**
4640  * Validate the modify-header actions of increment/decrement
4641  * TCP Acknowledgment number.
4642  *
4643  * @param[in] action_flags
4644  *   Holds the actions detected until now.
4645  * @param[in] action
4646  *   Pointer to the modify action.
4647  * @param[in] item_flags
4648  *   Holds the items detected.
4649  * @param[out] error
4650  *   Pointer to error structure.
4651  *
4652  * @return
4653  *   0 on success, a negative errno value otherwise and rte_errno is set.
4654  */
4655 static int
4656 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
4657                                        const struct rte_flow_action *action,
4658                                        const uint64_t item_flags,
4659                                        struct rte_flow_error *error)
4660 {
4661         int ret = 0;
4662         uint64_t layer;
4663
4664         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4665         if (!ret) {
4666                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4667                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
4668                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
4669                 if (!(item_flags & layer))
4670                         return rte_flow_error_set(error, EINVAL,
4671                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4672                                                   NULL, "no TCP item in"
4673                                                   " pattern");
4674                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
4675                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
4676                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
4677                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
4678                         return rte_flow_error_set(error, EINVAL,
4679                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4680                                                   NULL,
4681                                                   "cannot decrease and increase"
4682                                                   " TCP acknowledgment number"
4683                                                   " at the same time");
4684         }
4685         return ret;
4686 }
4687
4688 /**
4689  * Validate the modify-header TTL actions.
4690  *
4691  * @param[in] action_flags
4692  *   Holds the actions detected until now.
4693  * @param[in] action
4694  *   Pointer to the modify action.
4695  * @param[in] item_flags
4696  *   Holds the items detected.
4697  * @param[out] error
4698  *   Pointer to error structure.
4699  *
4700  * @return
4701  *   0 on success, a negative errno value otherwise and rte_errno is set.
4702  */
4703 static int
4704 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
4705                                    const struct rte_flow_action *action,
4706                                    const uint64_t item_flags,
4707                                    struct rte_flow_error *error)
4708 {
4709         int ret = 0;
4710         uint64_t layer;
4711
4712         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4713         if (!ret) {
4714                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4715                                  MLX5_FLOW_LAYER_INNER_L3 :
4716                                  MLX5_FLOW_LAYER_OUTER_L3;
4717                 if (!(item_flags & layer))
4718                         return rte_flow_error_set(error, EINVAL,
4719                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4720                                                   NULL,
4721                                                   "no IP protocol in pattern");
4722         }
4723         return ret;
4724 }
4725
4726 /**
4727  * Validate the generic modify field actions.
4728  * @param[in] dev
4729  *   Pointer to the rte_eth_dev structure.
4730  * @param[in] action_flags
4731  *   Holds the actions detected until now.
4732  * @param[in] action
4733  *   Pointer to the modify action.
4734  * @param[in] attr
4735  *   Pointer to the flow attributes.
4736  * @param[out] error
4737  *   Pointer to error structure.
4738  *
4739  * @return
4740  *   Number of header fields to modify (0 or more) on success,
4741  *   a negative errno value otherwise and rte_errno is set.
4742  */
4743 static int
4744 flow_dv_validate_action_modify_field(struct rte_eth_dev *dev,
4745                                    const uint64_t action_flags,
4746                                    const struct rte_flow_action *action,
4747                                    const struct rte_flow_attr *attr,
4748                                    struct rte_flow_error *error)
4749 {
4750         int ret = 0;
4751         struct mlx5_priv *priv = dev->data->dev_private;
4752         struct mlx5_dev_config *config = &priv->config;
4753         const struct rte_flow_action_modify_field *action_modify_field =
4754                 action->conf;
4755         uint32_t dst_width = mlx5_flow_item_field_width(config,
4756                                 action_modify_field->dst.field);
4757         uint32_t src_width = mlx5_flow_item_field_width(config,
4758                                 action_modify_field->src.field);
4759
4760         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4761         if (ret)
4762                 return ret;
4763
4764         if (action_modify_field->width == 0)
4765                 return rte_flow_error_set(error, EINVAL,
4766                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4767                                 "no bits are requested to be modified");
4768         else if (action_modify_field->width > dst_width ||
4769                  action_modify_field->width > src_width)
4770                 return rte_flow_error_set(error, EINVAL,
4771                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4772                                 "cannot modify more bits than"
4773                                 " the width of a field");
4774         if (action_modify_field->dst.field != RTE_FLOW_FIELD_VALUE &&
4775             action_modify_field->dst.field != RTE_FLOW_FIELD_POINTER) {
4776                 if ((action_modify_field->dst.offset +
4777                      action_modify_field->width > dst_width) ||
4778                     (action_modify_field->dst.offset % 32))
4779                         return rte_flow_error_set(error, EINVAL,
4780                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4781                                         "destination offset is too big"
4782                                         " or not aligned to 4 bytes");
4783                 if (action_modify_field->dst.level &&
4784                     action_modify_field->dst.field != RTE_FLOW_FIELD_TAG)
4785                         return rte_flow_error_set(error, ENOTSUP,
4786                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4787                                         "inner header fields modification"
4788                                         " is not supported");
4789         }
4790         if (action_modify_field->src.field != RTE_FLOW_FIELD_VALUE &&
4791             action_modify_field->src.field != RTE_FLOW_FIELD_POINTER) {
4792                 if (!attr->transfer && !attr->group)
4793                         return rte_flow_error_set(error, ENOTSUP,
4794                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4795                                         "modify field action is not"
4796                                         " supported for group 0");
4797                 if ((action_modify_field->src.offset +
4798                      action_modify_field->width > src_width) ||
4799                     (action_modify_field->src.offset % 32))
4800                         return rte_flow_error_set(error, EINVAL,
4801                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4802                                         "source offset is too big"
4803                                         " or not aligned to 4 bytes");
4804                 if (action_modify_field->src.level &&
4805                     action_modify_field->src.field != RTE_FLOW_FIELD_TAG)
4806                         return rte_flow_error_set(error, ENOTSUP,
4807                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4808                                         "inner header fields modification"
4809                                         " is not supported");
4810         }
4811         if ((action_modify_field->dst.field ==
4812              action_modify_field->src.field) &&
4813             (action_modify_field->dst.level ==
4814              action_modify_field->src.level))
4815                 return rte_flow_error_set(error, EINVAL,
4816                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4817                                 "source and destination fields"
4818                                 " cannot be the same");
4819         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VALUE ||
4820             action_modify_field->dst.field == RTE_FLOW_FIELD_POINTER)
4821                 return rte_flow_error_set(error, EINVAL,
4822                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4823                                 "immediate value or a pointer to it"
4824                                 " cannot be used as a destination");
4825         if (action_modify_field->dst.field == RTE_FLOW_FIELD_START ||
4826             action_modify_field->src.field == RTE_FLOW_FIELD_START)
4827                 return rte_flow_error_set(error, ENOTSUP,
4828                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4829                                 "modifications of an arbitrary"
4830                                 " place in a packet is not supported");
4831         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VLAN_TYPE ||
4832             action_modify_field->src.field == RTE_FLOW_FIELD_VLAN_TYPE)
4833                 return rte_flow_error_set(error, ENOTSUP,
4834                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4835                                 "modifications of the 802.1Q Tag"
4836                                 " Identifier is not supported");
4837         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VXLAN_VNI ||
4838             action_modify_field->src.field == RTE_FLOW_FIELD_VXLAN_VNI)
4839                 return rte_flow_error_set(error, ENOTSUP,
4840                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4841                                 "modifications of the VXLAN Network"
4842                                 " Identifier is not supported");
4843         if (action_modify_field->dst.field == RTE_FLOW_FIELD_GENEVE_VNI ||
4844             action_modify_field->src.field == RTE_FLOW_FIELD_GENEVE_VNI)
4845                 return rte_flow_error_set(error, ENOTSUP,
4846                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4847                                 "modifications of the GENEVE Network"
4848                                 " Identifier is not supported");
4849         if (action_modify_field->dst.field == RTE_FLOW_FIELD_MARK ||
4850             action_modify_field->src.field == RTE_FLOW_FIELD_MARK ||
4851             action_modify_field->dst.field == RTE_FLOW_FIELD_META ||
4852             action_modify_field->src.field == RTE_FLOW_FIELD_META) {
4853                 if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
4854                     !mlx5_flow_ext_mreg_supported(dev))
4855                         return rte_flow_error_set(error, ENOTSUP,
4856                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4857                                         "cannot modify mark or metadata without"
4858                                         " extended metadata register support");
4859         }
4860         if (action_modify_field->operation != RTE_FLOW_MODIFY_SET)
4861                 return rte_flow_error_set(error, ENOTSUP,
4862                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4863                                 "add and sub operations"
4864                                 " are not supported");
4865         return (action_modify_field->width / 32) +
4866                !!(action_modify_field->width % 32);
4867 }
4868
4869 /**
4870  * Validate jump action.
4871  *
4872  * @param[in] action
4873  *   Pointer to the jump action.
4874  * @param[in] action_flags
4875  *   Holds the actions detected until now.
4876  * @param[in] attributes
4877  *   Pointer to flow attributes
4878  * @param[in] external
4879  *   Action belongs to flow rule created by request external to PMD.
4880  * @param[out] error
4881  *   Pointer to error structure.
4882  *
4883  * @return
4884  *   0 on success, a negative errno value otherwise and rte_errno is set.
4885  */
4886 static int
4887 flow_dv_validate_action_jump(struct rte_eth_dev *dev,
4888                              const struct mlx5_flow_tunnel *tunnel,
4889                              const struct rte_flow_action *action,
4890                              uint64_t action_flags,
4891                              const struct rte_flow_attr *attributes,
4892                              bool external, struct rte_flow_error *error)
4893 {
4894         uint32_t target_group, table;
4895         int ret = 0;
4896         struct flow_grp_info grp_info = {
4897                 .external = !!external,
4898                 .transfer = !!attributes->transfer,
4899                 .fdb_def_rule = 1,
4900                 .std_tbl_fix = 0
4901         };
4902         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
4903                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4904                 return rte_flow_error_set(error, EINVAL,
4905                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4906                                           "can't have 2 fate actions in"
4907                                           " same flow");
4908         if (!action->conf)
4909                 return rte_flow_error_set(error, EINVAL,
4910                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4911                                           NULL, "action configuration not set");
4912         target_group =
4913                 ((const struct rte_flow_action_jump *)action->conf)->group;
4914         ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table,
4915                                        &grp_info, error);
4916         if (ret)
4917                 return ret;
4918         if (attributes->group == target_group &&
4919             !(action_flags & (MLX5_FLOW_ACTION_TUNNEL_SET |
4920                               MLX5_FLOW_ACTION_TUNNEL_MATCH)))
4921                 return rte_flow_error_set(error, EINVAL,
4922                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4923                                           "target group must be other than"
4924                                           " the current flow group");
4925         return 0;
4926 }
4927
4928 /*
4929  * Validate the port_id action.
4930  *
4931  * @param[in] dev
4932  *   Pointer to rte_eth_dev structure.
4933  * @param[in] action_flags
4934  *   Bit-fields that holds the actions detected until now.
4935  * @param[in] action
4936  *   Port_id RTE action structure.
4937  * @param[in] attr
4938  *   Attributes of flow that includes this action.
4939  * @param[out] error
4940  *   Pointer to error structure.
4941  *
4942  * @return
4943  *   0 on success, a negative errno value otherwise and rte_errno is set.
4944  */
4945 static int
4946 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
4947                                 uint64_t action_flags,
4948                                 const struct rte_flow_action *action,
4949                                 const struct rte_flow_attr *attr,
4950                                 struct rte_flow_error *error)
4951 {
4952         const struct rte_flow_action_port_id *port_id;
4953         struct mlx5_priv *act_priv;
4954         struct mlx5_priv *dev_priv;
4955         uint16_t port;
4956
4957         if (!attr->transfer)
4958                 return rte_flow_error_set(error, ENOTSUP,
4959                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4960                                           NULL,
4961                                           "port id action is valid in transfer"
4962                                           " mode only");
4963         if (!action || !action->conf)
4964                 return rte_flow_error_set(error, ENOTSUP,
4965                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4966                                           NULL,
4967                                           "port id action parameters must be"
4968                                           " specified");
4969         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
4970                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4971                 return rte_flow_error_set(error, EINVAL,
4972                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4973                                           "can have only one fate actions in"
4974                                           " a flow");
4975         dev_priv = mlx5_dev_to_eswitch_info(dev);
4976         if (!dev_priv)
4977                 return rte_flow_error_set(error, rte_errno,
4978                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4979                                           NULL,
4980                                           "failed to obtain E-Switch info");
4981         port_id = action->conf;
4982         port = port_id->original ? dev->data->port_id : port_id->id;
4983         act_priv = mlx5_port_to_eswitch_info(port, false);
4984         if (!act_priv)
4985                 return rte_flow_error_set
4986                                 (error, rte_errno,
4987                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
4988                                  "failed to obtain E-Switch port id for port");
4989         if (act_priv->domain_id != dev_priv->domain_id)
4990                 return rte_flow_error_set
4991                                 (error, EINVAL,
4992                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4993                                  "port does not belong to"
4994                                  " E-Switch being configured");
4995         return 0;
4996 }
4997
4998 /**
4999  * Get the maximum number of modify header actions.
5000  *
5001  * @param dev
5002  *   Pointer to rte_eth_dev structure.
5003  * @param flags
5004  *   Flags bits to check if root level.
5005  *
5006  * @return
5007  *   Max number of modify header actions device can support.
5008  */
5009 static inline unsigned int
5010 flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
5011                               uint64_t flags)
5012 {
5013         /*
5014          * There's no way to directly query the max capacity from FW.
5015          * The maximal value on root table should be assumed to be supported.
5016          */
5017         if (!(flags & MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL))
5018                 return MLX5_MAX_MODIFY_NUM;
5019         else
5020                 return MLX5_ROOT_TBL_MODIFY_NUM;
5021 }
5022
5023 /**
5024  * Validate the meter action.
5025  *
5026  * @param[in] dev
5027  *   Pointer to rte_eth_dev structure.
5028  * @param[in] action_flags
5029  *   Bit-fields that holds the actions detected until now.
5030  * @param[in] action
5031  *   Pointer to the meter action.
5032  * @param[in] attr
5033  *   Attributes of flow that includes this action.
5034  * @param[in] port_id_item
5035  *   Pointer to item indicating port id.
5036  * @param[out] error
5037  *   Pointer to error structure.
5038  *
5039  * @return
5040  *   0 on success, a negative errno value otherwise and rte_ernno is set.
5041  */
5042 static int
5043 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
5044                                 uint64_t action_flags,
5045                                 const struct rte_flow_action *action,
5046                                 const struct rte_flow_attr *attr,
5047                                 const struct rte_flow_item *port_id_item,
5048                                 bool *def_policy,
5049                                 struct rte_flow_error *error)
5050 {
5051         struct mlx5_priv *priv = dev->data->dev_private;
5052         const struct rte_flow_action_meter *am = action->conf;
5053         struct mlx5_flow_meter_info *fm;
5054         struct mlx5_flow_meter_policy *mtr_policy;
5055         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
5056
5057         if (!am)
5058                 return rte_flow_error_set(error, EINVAL,
5059                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5060                                           "meter action conf is NULL");
5061
5062         if (action_flags & MLX5_FLOW_ACTION_METER)
5063                 return rte_flow_error_set(error, ENOTSUP,
5064                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5065                                           "meter chaining not support");
5066         if (action_flags & MLX5_FLOW_ACTION_JUMP)
5067                 return rte_flow_error_set(error, ENOTSUP,
5068                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5069                                           "meter with jump not support");
5070         if (!priv->mtr_en)
5071                 return rte_flow_error_set(error, ENOTSUP,
5072                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5073                                           NULL,
5074                                           "meter action not supported");
5075         fm = mlx5_flow_meter_find(priv, am->mtr_id, NULL);
5076         if (!fm)
5077                 return rte_flow_error_set(error, EINVAL,
5078                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5079                                           "Meter not found");
5080         /* aso meter can always be shared by different domains */
5081         if (fm->ref_cnt && !priv->sh->meter_aso_en &&
5082             !(fm->transfer == attr->transfer ||
5083               (!fm->ingress && !attr->ingress && attr->egress) ||
5084               (!fm->egress && !attr->egress && attr->ingress)))
5085                 return rte_flow_error_set(error, EINVAL,
5086                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5087                         "Flow attributes domain are either invalid "
5088                         "or have a domain conflict with current "
5089                         "meter attributes");
5090         if (fm->def_policy) {
5091                 if (!((attr->transfer &&
5092                         mtrmng->def_policy[MLX5_MTR_DOMAIN_TRANSFER]) ||
5093                         (attr->egress &&
5094                         mtrmng->def_policy[MLX5_MTR_DOMAIN_EGRESS]) ||
5095                         (attr->ingress &&
5096                         mtrmng->def_policy[MLX5_MTR_DOMAIN_INGRESS])))
5097                         return rte_flow_error_set(error, EINVAL,
5098                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5099                                           "Flow attributes domain "
5100                                           "have a conflict with current "
5101                                           "meter domain attributes");
5102                 *def_policy = true;
5103         } else {
5104                 mtr_policy = mlx5_flow_meter_policy_find(dev,
5105                                                 fm->policy_id, NULL);
5106                 if (!mtr_policy)
5107                         return rte_flow_error_set(error, EINVAL,
5108                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5109                                           "Invalid policy id for meter ");
5110                 if (!((attr->transfer && mtr_policy->transfer) ||
5111                         (attr->egress && mtr_policy->egress) ||
5112                         (attr->ingress && mtr_policy->ingress)))
5113                         return rte_flow_error_set(error, EINVAL,
5114                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5115                                           "Flow attributes domain "
5116                                           "have a conflict with current "
5117                                           "meter domain attributes");
5118                 if (attr->transfer && mtr_policy->dev) {
5119                         /**
5120                          * When policy has fate action of port_id,
5121                          * the flow should have the same src port as policy.
5122                          */
5123                         struct mlx5_priv *policy_port_priv =
5124                                         mtr_policy->dev->data->dev_private;
5125                         int32_t flow_src_port = priv->representor_id;
5126
5127                         if (port_id_item) {
5128                                 const struct rte_flow_item_port_id *spec =
5129                                                         port_id_item->spec;
5130                                 struct mlx5_priv *port_priv =
5131                                         mlx5_port_to_eswitch_info(spec->id,
5132                                                                   false);
5133                                 if (!port_priv)
5134                                         return rte_flow_error_set(error,
5135                                                 rte_errno,
5136                                                 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
5137                                                 spec,
5138                                                 "Failed to get port info.");
5139                                 flow_src_port = port_priv->representor_id;
5140                         }
5141                         if (flow_src_port != policy_port_priv->representor_id)
5142                                 return rte_flow_error_set(error,
5143                                                 rte_errno,
5144                                                 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
5145                                                 NULL,
5146                                                 "Flow and meter policy "
5147                                                 "have different src port.");
5148                 }
5149                 *def_policy = false;
5150         }
5151         return 0;
5152 }
5153
5154 /**
5155  * Validate the age action.
5156  *
5157  * @param[in] action_flags
5158  *   Holds the actions detected until now.
5159  * @param[in] action
5160  *   Pointer to the age action.
5161  * @param[in] dev
5162  *   Pointer to the Ethernet device structure.
5163  * @param[out] error
5164  *   Pointer to error structure.
5165  *
5166  * @return
5167  *   0 on success, a negative errno value otherwise and rte_errno is set.
5168  */
5169 static int
5170 flow_dv_validate_action_age(uint64_t action_flags,
5171                             const struct rte_flow_action *action,
5172                             struct rte_eth_dev *dev,
5173                             struct rte_flow_error *error)
5174 {
5175         struct mlx5_priv *priv = dev->data->dev_private;
5176         const struct rte_flow_action_age *age = action->conf;
5177
5178         if (!priv->config.devx || (priv->sh->cmng.counter_fallback &&
5179             !priv->sh->aso_age_mng))
5180                 return rte_flow_error_set(error, ENOTSUP,
5181                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5182                                           NULL,
5183                                           "age action not supported");
5184         if (!(action->conf))
5185                 return rte_flow_error_set(error, EINVAL,
5186                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5187                                           "configuration cannot be null");
5188         if (!(age->timeout))
5189                 return rte_flow_error_set(error, EINVAL,
5190                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5191                                           "invalid timeout value 0");
5192         if (action_flags & MLX5_FLOW_ACTION_AGE)
5193                 return rte_flow_error_set(error, EINVAL,
5194                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5195                                           "duplicate age actions set");
5196         return 0;
5197 }
5198
5199 /**
5200  * Validate the modify-header IPv4 DSCP actions.
5201  *
5202  * @param[in] action_flags
5203  *   Holds the actions detected until now.
5204  * @param[in] action
5205  *   Pointer to the modify action.
5206  * @param[in] item_flags
5207  *   Holds the items detected.
5208  * @param[out] error
5209  *   Pointer to error structure.
5210  *
5211  * @return
5212  *   0 on success, a negative errno value otherwise and rte_errno is set.
5213  */
5214 static int
5215 flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags,
5216                                          const struct rte_flow_action *action,
5217                                          const uint64_t item_flags,
5218                                          struct rte_flow_error *error)
5219 {
5220         int ret = 0;
5221
5222         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5223         if (!ret) {
5224                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
5225                         return rte_flow_error_set(error, EINVAL,
5226                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5227                                                   NULL,
5228                                                   "no ipv4 item in pattern");
5229         }
5230         return ret;
5231 }
5232
5233 /**
5234  * Validate the modify-header IPv6 DSCP actions.
5235  *
5236  * @param[in] action_flags
5237  *   Holds the actions detected until now.
5238  * @param[in] action
5239  *   Pointer to the modify action.
5240  * @param[in] item_flags
5241  *   Holds the items detected.
5242  * @param[out] error
5243  *   Pointer to error structure.
5244  *
5245  * @return
5246  *   0 on success, a negative errno value otherwise and rte_errno is set.
5247  */
5248 static int
5249 flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,
5250                                          const struct rte_flow_action *action,
5251                                          const uint64_t item_flags,
5252                                          struct rte_flow_error *error)
5253 {
5254         int ret = 0;
5255
5256         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5257         if (!ret) {
5258                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
5259                         return rte_flow_error_set(error, EINVAL,
5260                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5261                                                   NULL,
5262                                                   "no ipv6 item in pattern");
5263         }
5264         return ret;
5265 }
5266
5267 /**
5268  * Match modify-header resource.
5269  *
5270  * @param list
5271  *   Pointer to the hash list.
5272  * @param entry
5273  *   Pointer to exist resource entry object.
5274  * @param key
5275  *   Key of the new entry.
5276  * @param ctx
5277  *   Pointer to new modify-header resource.
5278  *
5279  * @return
5280  *   0 on matching, non-zero otherwise.
5281  */
5282 int
5283 flow_dv_modify_match_cb(struct mlx5_hlist *list __rte_unused,
5284                         struct mlx5_hlist_entry *entry,
5285                         uint64_t key __rte_unused, void *cb_ctx)
5286 {
5287         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5288         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5289         struct mlx5_flow_dv_modify_hdr_resource *resource =
5290                         container_of(entry, typeof(*resource), entry);
5291         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5292
5293         key_len += ref->actions_num * sizeof(ref->actions[0]);
5294         return ref->actions_num != resource->actions_num ||
5295                memcmp(&ref->ft_type, &resource->ft_type, key_len);
5296 }
5297
5298 struct mlx5_hlist_entry *
5299 flow_dv_modify_create_cb(struct mlx5_hlist *list, uint64_t key __rte_unused,
5300                          void *cb_ctx)
5301 {
5302         struct mlx5_dev_ctx_shared *sh = list->ctx;
5303         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5304         struct mlx5dv_dr_domain *ns;
5305         struct mlx5_flow_dv_modify_hdr_resource *entry;
5306         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5307         int ret;
5308         uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
5309         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5310
5311         entry = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*entry) + data_len, 0,
5312                             SOCKET_ID_ANY);
5313         if (!entry) {
5314                 rte_flow_error_set(ctx->error, ENOMEM,
5315                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5316                                    "cannot allocate resource memory");
5317                 return NULL;
5318         }
5319         rte_memcpy(&entry->ft_type,
5320                    RTE_PTR_ADD(ref, offsetof(typeof(*ref), ft_type)),
5321                    key_len + data_len);
5322         if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
5323                 ns = sh->fdb_domain;
5324         else if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
5325                 ns = sh->tx_domain;
5326         else
5327                 ns = sh->rx_domain;
5328         ret = mlx5_flow_os_create_flow_action_modify_header
5329                                         (sh->ctx, ns, entry,
5330                                          data_len, &entry->action);
5331         if (ret) {
5332                 mlx5_free(entry);
5333                 rte_flow_error_set(ctx->error, ENOMEM,
5334                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5335                                    NULL, "cannot create modification action");
5336                 return NULL;
5337         }
5338         return &entry->entry;
5339 }
5340
5341 /**
5342  * Validate the sample action.
5343  *
5344  * @param[in, out] action_flags
5345  *   Holds the actions detected until now.
5346  * @param[in] action
5347  *   Pointer to the sample action.
5348  * @param[in] dev
5349  *   Pointer to the Ethernet device structure.
5350  * @param[in] attr
5351  *   Attributes of flow that includes this action.
5352  * @param[in] item_flags
5353  *   Holds the items detected.
5354  * @param[in] rss
5355  *   Pointer to the RSS action.
5356  * @param[out] sample_rss
5357  *   Pointer to the RSS action in sample action list.
5358  * @param[out] count
5359  *   Pointer to the COUNT action in sample action list.
5360  * @param[out] fdb_mirror_limit
5361  *   Pointer to the FDB mirror limitation flag.
5362  * @param[out] error
5363  *   Pointer to error structure.
5364  *
5365  * @return
5366  *   0 on success, a negative errno value otherwise and rte_errno is set.
5367  */
5368 static int
5369 flow_dv_validate_action_sample(uint64_t *action_flags,
5370                                const struct rte_flow_action *action,
5371                                struct rte_eth_dev *dev,
5372                                const struct rte_flow_attr *attr,
5373                                uint64_t item_flags,
5374                                const struct rte_flow_action_rss *rss,
5375                                const struct rte_flow_action_rss **sample_rss,
5376                                const struct rte_flow_action_count **count,
5377                                int *fdb_mirror_limit,
5378                                struct rte_flow_error *error)
5379 {
5380         struct mlx5_priv *priv = dev->data->dev_private;
5381         struct mlx5_dev_config *dev_conf = &priv->config;
5382         const struct rte_flow_action_sample *sample = action->conf;
5383         const struct rte_flow_action *act;
5384         uint64_t sub_action_flags = 0;
5385         uint16_t queue_index = 0xFFFF;
5386         int actions_n = 0;
5387         int ret;
5388
5389         if (!sample)
5390                 return rte_flow_error_set(error, EINVAL,
5391                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5392                                           "configuration cannot be NULL");
5393         if (sample->ratio == 0)
5394                 return rte_flow_error_set(error, EINVAL,
5395                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5396                                           "ratio value starts from 1");
5397         if (!priv->config.devx || (sample->ratio > 0 && !priv->sampler_en))
5398                 return rte_flow_error_set(error, ENOTSUP,
5399                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5400                                           NULL,
5401                                           "sample action not supported");
5402         if (*action_flags & MLX5_FLOW_ACTION_SAMPLE)
5403                 return rte_flow_error_set(error, EINVAL,
5404                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5405                                           "Multiple sample actions not "
5406                                           "supported");
5407         if (*action_flags & MLX5_FLOW_ACTION_METER)
5408                 return rte_flow_error_set(error, EINVAL,
5409                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5410                                           "wrong action order, meter should "
5411                                           "be after sample action");
5412         if (*action_flags & MLX5_FLOW_ACTION_JUMP)
5413                 return rte_flow_error_set(error, EINVAL,
5414                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5415                                           "wrong action order, jump should "
5416                                           "be after sample action");
5417         act = sample->actions;
5418         for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
5419                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
5420                         return rte_flow_error_set(error, ENOTSUP,
5421                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5422                                                   act, "too many actions");
5423                 switch (act->type) {
5424                 case RTE_FLOW_ACTION_TYPE_QUEUE:
5425                         ret = mlx5_flow_validate_action_queue(act,
5426                                                               sub_action_flags,
5427                                                               dev,
5428                                                               attr, error);
5429                         if (ret < 0)
5430                                 return ret;
5431                         queue_index = ((const struct rte_flow_action_queue *)
5432                                                         (act->conf))->index;
5433                         sub_action_flags |= MLX5_FLOW_ACTION_QUEUE;
5434                         ++actions_n;
5435                         break;
5436                 case RTE_FLOW_ACTION_TYPE_RSS:
5437                         *sample_rss = act->conf;
5438                         ret = mlx5_flow_validate_action_rss(act,
5439                                                             sub_action_flags,
5440                                                             dev, attr,
5441                                                             item_flags,
5442                                                             error);
5443                         if (ret < 0)
5444                                 return ret;
5445                         if (rss && *sample_rss &&
5446                             ((*sample_rss)->level != rss->level ||
5447                             (*sample_rss)->types != rss->types))
5448                                 return rte_flow_error_set(error, ENOTSUP,
5449                                         RTE_FLOW_ERROR_TYPE_ACTION,
5450                                         NULL,
5451                                         "Can't use the different RSS types "
5452                                         "or level in the same flow");
5453                         if (*sample_rss != NULL && (*sample_rss)->queue_num)
5454                                 queue_index = (*sample_rss)->queue[0];
5455                         sub_action_flags |= MLX5_FLOW_ACTION_RSS;
5456                         ++actions_n;
5457                         break;
5458                 case RTE_FLOW_ACTION_TYPE_MARK:
5459                         ret = flow_dv_validate_action_mark(dev, act,
5460                                                            sub_action_flags,
5461                                                            attr, error);
5462                         if (ret < 0)
5463                                 return ret;
5464                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
5465                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK |
5466                                                 MLX5_FLOW_ACTION_MARK_EXT;
5467                         else
5468                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK;
5469                         ++actions_n;
5470                         break;
5471                 case RTE_FLOW_ACTION_TYPE_COUNT:
5472                         ret = flow_dv_validate_action_count
5473                                 (dev, is_shared_action_count(act),
5474                                  *action_flags | sub_action_flags,
5475                                  error);
5476                         if (ret < 0)
5477                                 return ret;
5478                         *count = act->conf;
5479                         sub_action_flags |= MLX5_FLOW_ACTION_COUNT;
5480                         *action_flags |= MLX5_FLOW_ACTION_COUNT;
5481                         ++actions_n;
5482                         break;
5483                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
5484                         ret = flow_dv_validate_action_port_id(dev,
5485                                                               sub_action_flags,
5486                                                               act,
5487                                                               attr,
5488                                                               error);
5489                         if (ret)
5490                                 return ret;
5491                         sub_action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5492                         ++actions_n;
5493                         break;
5494                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5495                         ret = flow_dv_validate_action_raw_encap_decap
5496                                 (dev, NULL, act->conf, attr, &sub_action_flags,
5497                                  &actions_n, action, item_flags, error);
5498                         if (ret < 0)
5499                                 return ret;
5500                         ++actions_n;
5501                         break;
5502                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5503                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5504                         ret = flow_dv_validate_action_l2_encap(dev,
5505                                                                sub_action_flags,
5506                                                                act, attr,
5507                                                                error);
5508                         if (ret < 0)
5509                                 return ret;
5510                         sub_action_flags |= MLX5_FLOW_ACTION_ENCAP;
5511                         ++actions_n;
5512                         break;
5513                 default:
5514                         return rte_flow_error_set(error, ENOTSUP,
5515                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5516                                                   NULL,
5517                                                   "Doesn't support optional "
5518                                                   "action");
5519                 }
5520         }
5521         if (attr->ingress && !attr->transfer) {
5522                 if (!(sub_action_flags & (MLX5_FLOW_ACTION_QUEUE |
5523                                           MLX5_FLOW_ACTION_RSS)))
5524                         return rte_flow_error_set(error, EINVAL,
5525                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5526                                                   NULL,
5527                                                   "Ingress must has a dest "
5528                                                   "QUEUE for Sample");
5529         } else if (attr->egress && !attr->transfer) {
5530                 return rte_flow_error_set(error, ENOTSUP,
5531                                           RTE_FLOW_ERROR_TYPE_ACTION,
5532                                           NULL,
5533                                           "Sample Only support Ingress "
5534                                           "or E-Switch");
5535         } else if (sample->actions->type != RTE_FLOW_ACTION_TYPE_END) {
5536                 MLX5_ASSERT(attr->transfer);
5537                 if (sample->ratio > 1)
5538                         return rte_flow_error_set(error, ENOTSUP,
5539                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5540                                                   NULL,
5541                                                   "E-Switch doesn't support "
5542                                                   "any optional action "
5543                                                   "for sampling");
5544                 if (sub_action_flags & MLX5_FLOW_ACTION_QUEUE)
5545                         return rte_flow_error_set(error, ENOTSUP,
5546                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5547                                                   NULL,
5548                                                   "unsupported action QUEUE");
5549                 if (sub_action_flags & MLX5_FLOW_ACTION_RSS)
5550                         return rte_flow_error_set(error, ENOTSUP,
5551                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5552                                                   NULL,
5553                                                   "unsupported action QUEUE");
5554                 if (!(sub_action_flags & MLX5_FLOW_ACTION_PORT_ID))
5555                         return rte_flow_error_set(error, EINVAL,
5556                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5557                                                   NULL,
5558                                                   "E-Switch must has a dest "
5559                                                   "port for mirroring");
5560                 if (!priv->config.hca_attr.reg_c_preserve &&
5561                      priv->representor_id != UINT16_MAX)
5562                         *fdb_mirror_limit = 1;
5563         }
5564         /* Continue validation for Xcap actions.*/
5565         if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) &&
5566             (queue_index == 0xFFFF ||
5567              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
5568                 if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
5569                      MLX5_FLOW_XCAP_ACTIONS)
5570                         return rte_flow_error_set(error, ENOTSUP,
5571                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5572                                                   NULL, "encap and decap "
5573                                                   "combination aren't "
5574                                                   "supported");
5575                 if (!attr->transfer && attr->ingress && (sub_action_flags &
5576                                                         MLX5_FLOW_ACTION_ENCAP))
5577                         return rte_flow_error_set(error, ENOTSUP,
5578                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5579                                                   NULL, "encap is not supported"
5580                                                   " for ingress traffic");
5581         }
5582         return 0;
5583 }
5584
5585 /**
5586  * Find existing modify-header resource or create and register a new one.
5587  *
5588  * @param dev[in, out]
5589  *   Pointer to rte_eth_dev structure.
5590  * @param[in, out] resource
5591  *   Pointer to modify-header resource.
5592  * @parm[in, out] dev_flow
5593  *   Pointer to the dev_flow.
5594  * @param[out] error
5595  *   pointer to error structure.
5596  *
5597  * @return
5598  *   0 on success otherwise -errno and errno is set.
5599  */
5600 static int
5601 flow_dv_modify_hdr_resource_register
5602                         (struct rte_eth_dev *dev,
5603                          struct mlx5_flow_dv_modify_hdr_resource *resource,
5604                          struct mlx5_flow *dev_flow,
5605                          struct rte_flow_error *error)
5606 {
5607         struct mlx5_priv *priv = dev->data->dev_private;
5608         struct mlx5_dev_ctx_shared *sh = priv->sh;
5609         uint32_t key_len = sizeof(*resource) -
5610                            offsetof(typeof(*resource), ft_type) +
5611                            resource->actions_num * sizeof(resource->actions[0]);
5612         struct mlx5_hlist_entry *entry;
5613         struct mlx5_flow_cb_ctx ctx = {
5614                 .error = error,
5615                 .data = resource,
5616         };
5617         uint64_t key64;
5618
5619         resource->flags = dev_flow->dv.group ? 0 :
5620                           MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
5621         if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
5622                                     resource->flags))
5623                 return rte_flow_error_set(error, EOVERFLOW,
5624                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5625                                           "too many modify header items");
5626         key64 = __rte_raw_cksum(&resource->ft_type, key_len, 0);
5627         entry = mlx5_hlist_register(sh->modify_cmds, key64, &ctx);
5628         if (!entry)
5629                 return -rte_errno;
5630         resource = container_of(entry, typeof(*resource), entry);
5631         dev_flow->handle->dvh.modify_hdr = resource;
5632         return 0;
5633 }
5634
5635 /**
5636  * Get DV flow counter by index.
5637  *
5638  * @param[in] dev
5639  *   Pointer to the Ethernet device structure.
5640  * @param[in] idx
5641  *   mlx5 flow counter index in the container.
5642  * @param[out] ppool
5643  *   mlx5 flow counter pool in the container.
5644  *
5645  * @return
5646  *   Pointer to the counter, NULL otherwise.
5647  */
5648 static struct mlx5_flow_counter *
5649 flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
5650                            uint32_t idx,
5651                            struct mlx5_flow_counter_pool **ppool)
5652 {
5653         struct mlx5_priv *priv = dev->data->dev_private;
5654         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5655         struct mlx5_flow_counter_pool *pool;
5656
5657         /* Decrease to original index and clear shared bit. */
5658         idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
5659         MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cmng->n);
5660         pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
5661         MLX5_ASSERT(pool);
5662         if (ppool)
5663                 *ppool = pool;
5664         return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
5665 }
5666
5667 /**
5668  * Check the devx counter belongs to the pool.
5669  *
5670  * @param[in] pool
5671  *   Pointer to the counter pool.
5672  * @param[in] id
5673  *   The counter devx ID.
5674  *
5675  * @return
5676  *   True if counter belongs to the pool, false otherwise.
5677  */
5678 static bool
5679 flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id)
5680 {
5681         int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
5682                    MLX5_COUNTERS_PER_POOL;
5683
5684         if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
5685                 return true;
5686         return false;
5687 }
5688
5689 /**
5690  * Get a pool by devx counter ID.
5691  *
5692  * @param[in] cmng
5693  *   Pointer to the counter management.
5694  * @param[in] id
5695  *   The counter devx ID.
5696  *
5697  * @return
5698  *   The counter pool pointer if exists, NULL otherwise,
5699  */
5700 static struct mlx5_flow_counter_pool *
5701 flow_dv_find_pool_by_id(struct mlx5_flow_counter_mng *cmng, int id)
5702 {
5703         uint32_t i;
5704         struct mlx5_flow_counter_pool *pool = NULL;
5705
5706         rte_spinlock_lock(&cmng->pool_update_sl);
5707         /* Check last used pool. */
5708         if (cmng->last_pool_idx != POOL_IDX_INVALID &&
5709             flow_dv_is_counter_in_pool(cmng->pools[cmng->last_pool_idx], id)) {
5710                 pool = cmng->pools[cmng->last_pool_idx];
5711                 goto out;
5712         }
5713         /* ID out of range means no suitable pool in the container. */
5714         if (id > cmng->max_id || id < cmng->min_id)
5715                 goto out;
5716         /*
5717          * Find the pool from the end of the container, since mostly counter
5718          * ID is sequence increasing, and the last pool should be the needed
5719          * one.
5720          */
5721         i = cmng->n_valid;
5722         while (i--) {
5723                 struct mlx5_flow_counter_pool *pool_tmp = cmng->pools[i];
5724
5725                 if (flow_dv_is_counter_in_pool(pool_tmp, id)) {
5726                         pool = pool_tmp;
5727                         break;
5728                 }
5729         }
5730 out:
5731         rte_spinlock_unlock(&cmng->pool_update_sl);
5732         return pool;
5733 }
5734
5735 /**
5736  * Resize a counter container.
5737  *
5738  * @param[in] dev
5739  *   Pointer to the Ethernet device structure.
5740  *
5741  * @return
5742  *   0 on success, otherwise negative errno value and rte_errno is set.
5743  */
5744 static int
5745 flow_dv_container_resize(struct rte_eth_dev *dev)
5746 {
5747         struct mlx5_priv *priv = dev->data->dev_private;
5748         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5749         void *old_pools = cmng->pools;
5750         uint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE;
5751         uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
5752         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
5753
5754         if (!pools) {
5755                 rte_errno = ENOMEM;
5756                 return -ENOMEM;
5757         }
5758         if (old_pools)
5759                 memcpy(pools, old_pools, cmng->n *
5760                                        sizeof(struct mlx5_flow_counter_pool *));
5761         cmng->n = resize;
5762         cmng->pools = pools;
5763         if (old_pools)
5764                 mlx5_free(old_pools);
5765         return 0;
5766 }
5767
5768 /**
5769  * Query a devx flow counter.
5770  *
5771  * @param[in] dev
5772  *   Pointer to the Ethernet device structure.
5773  * @param[in] counter
5774  *   Index to the flow counter.
5775  * @param[out] pkts
5776  *   The statistics value of packets.
5777  * @param[out] bytes
5778  *   The statistics value of bytes.
5779  *
5780  * @return
5781  *   0 on success, otherwise a negative errno value and rte_errno is set.
5782  */
5783 static inline int
5784 _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
5785                      uint64_t *bytes)
5786 {
5787         struct mlx5_priv *priv = dev->data->dev_private;
5788         struct mlx5_flow_counter_pool *pool = NULL;
5789         struct mlx5_flow_counter *cnt;
5790         int offset;
5791
5792         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
5793         MLX5_ASSERT(pool);
5794         if (priv->sh->cmng.counter_fallback)
5795                 return mlx5_devx_cmd_flow_counter_query(cnt->dcs_when_active, 0,
5796                                         0, pkts, bytes, 0, NULL, NULL, 0);
5797         rte_spinlock_lock(&pool->sl);
5798         if (!pool->raw) {
5799                 *pkts = 0;
5800                 *bytes = 0;
5801         } else {
5802                 offset = MLX5_CNT_ARRAY_IDX(pool, cnt);
5803                 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
5804                 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
5805         }
5806         rte_spinlock_unlock(&pool->sl);
5807         return 0;
5808 }
5809
5810 /**
5811  * Create and initialize a new counter pool.
5812  *
5813  * @param[in] dev
5814  *   Pointer to the Ethernet device structure.
5815  * @param[out] dcs
5816  *   The devX counter handle.
5817  * @param[in] age
5818  *   Whether the pool is for counter that was allocated for aging.
5819  * @param[in/out] cont_cur
5820  *   Pointer to the container pointer, it will be update in pool resize.
5821  *
5822  * @return
5823  *   The pool container pointer on success, NULL otherwise and rte_errno is set.
5824  */
5825 static struct mlx5_flow_counter_pool *
5826 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
5827                     uint32_t age)
5828 {
5829         struct mlx5_priv *priv = dev->data->dev_private;
5830         struct mlx5_flow_counter_pool *pool;
5831         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5832         bool fallback = priv->sh->cmng.counter_fallback;
5833         uint32_t size = sizeof(*pool);
5834
5835         size += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE;
5836         size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * MLX5_AGE_SIZE);
5837         pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
5838         if (!pool) {
5839                 rte_errno = ENOMEM;
5840                 return NULL;
5841         }
5842         pool->raw = NULL;
5843         pool->is_aged = !!age;
5844         pool->query_gen = 0;
5845         pool->min_dcs = dcs;
5846         rte_spinlock_init(&pool->sl);
5847         rte_spinlock_init(&pool->csl);
5848         TAILQ_INIT(&pool->counters[0]);
5849         TAILQ_INIT(&pool->counters[1]);
5850         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
5851         rte_spinlock_lock(&cmng->pool_update_sl);
5852         pool->index = cmng->n_valid;
5853         if (pool->index == cmng->n && flow_dv_container_resize(dev)) {
5854                 mlx5_free(pool);
5855                 rte_spinlock_unlock(&cmng->pool_update_sl);
5856                 return NULL;
5857         }
5858         cmng->pools[pool->index] = pool;
5859         cmng->n_valid++;
5860         if (unlikely(fallback)) {
5861                 int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
5862
5863                 if (base < cmng->min_id)
5864                         cmng->min_id = base;
5865                 if (base > cmng->max_id)
5866                         cmng->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
5867                 cmng->last_pool_idx = pool->index;
5868         }
5869         rte_spinlock_unlock(&cmng->pool_update_sl);
5870         return pool;
5871 }
5872
5873 /**
5874  * Prepare a new counter and/or a new counter pool.
5875  *
5876  * @param[in] dev
5877  *   Pointer to the Ethernet device structure.
5878  * @param[out] cnt_free
5879  *   Where to put the pointer of a new counter.
5880  * @param[in] age
5881  *   Whether the pool is for counter that was allocated for aging.
5882  *
5883  * @return
5884  *   The counter pool pointer and @p cnt_free is set on success,
5885  *   NULL otherwise and rte_errno is set.
5886  */
5887 static struct mlx5_flow_counter_pool *
5888 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
5889                              struct mlx5_flow_counter **cnt_free,
5890                              uint32_t age)
5891 {
5892         struct mlx5_priv *priv = dev->data->dev_private;
5893         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5894         struct mlx5_flow_counter_pool *pool;
5895         struct mlx5_counters tmp_tq;
5896         struct mlx5_devx_obj *dcs = NULL;
5897         struct mlx5_flow_counter *cnt;
5898         enum mlx5_counter_type cnt_type =
5899                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
5900         bool fallback = priv->sh->cmng.counter_fallback;
5901         uint32_t i;
5902
5903         if (fallback) {
5904                 /* bulk_bitmap must be 0 for single counter allocation. */
5905                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
5906                 if (!dcs)
5907                         return NULL;
5908                 pool = flow_dv_find_pool_by_id(cmng, dcs->id);
5909                 if (!pool) {
5910                         pool = flow_dv_pool_create(dev, dcs, age);
5911                         if (!pool) {
5912                                 mlx5_devx_cmd_destroy(dcs);
5913                                 return NULL;
5914                         }
5915                 }
5916                 i = dcs->id % MLX5_COUNTERS_PER_POOL;
5917                 cnt = MLX5_POOL_GET_CNT(pool, i);
5918                 cnt->pool = pool;
5919                 cnt->dcs_when_free = dcs;
5920                 *cnt_free = cnt;
5921                 return pool;
5922         }
5923         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
5924         if (!dcs) {
5925                 rte_errno = ENODATA;
5926                 return NULL;
5927         }
5928         pool = flow_dv_pool_create(dev, dcs, age);
5929         if (!pool) {
5930                 mlx5_devx_cmd_destroy(dcs);
5931                 return NULL;
5932         }
5933         TAILQ_INIT(&tmp_tq);
5934         for (i = 1; i < MLX5_COUNTERS_PER_POOL; ++i) {
5935                 cnt = MLX5_POOL_GET_CNT(pool, i);
5936                 cnt->pool = pool;
5937                 TAILQ_INSERT_HEAD(&tmp_tq, cnt, next);
5938         }
5939         rte_spinlock_lock(&cmng->csl[cnt_type]);
5940         TAILQ_CONCAT(&cmng->counters[cnt_type], &tmp_tq, next);
5941         rte_spinlock_unlock(&cmng->csl[cnt_type]);
5942         *cnt_free = MLX5_POOL_GET_CNT(pool, 0);
5943         (*cnt_free)->pool = pool;
5944         return pool;
5945 }
5946
5947 /**
5948  * Allocate a flow counter.
5949  *
5950  * @param[in] dev
5951  *   Pointer to the Ethernet device structure.
5952  * @param[in] age
5953  *   Whether the counter was allocated for aging.
5954  *
5955  * @return
5956  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
5957  */
5958 static uint32_t
5959 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age)
5960 {
5961         struct mlx5_priv *priv = dev->data->dev_private;
5962         struct mlx5_flow_counter_pool *pool = NULL;
5963         struct mlx5_flow_counter *cnt_free = NULL;
5964         bool fallback = priv->sh->cmng.counter_fallback;
5965         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5966         enum mlx5_counter_type cnt_type =
5967                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
5968         uint32_t cnt_idx;
5969
5970         if (!priv->config.devx) {
5971                 rte_errno = ENOTSUP;
5972                 return 0;
5973         }
5974         /* Get free counters from container. */
5975         rte_spinlock_lock(&cmng->csl[cnt_type]);
5976         cnt_free = TAILQ_FIRST(&cmng->counters[cnt_type]);
5977         if (cnt_free)
5978                 TAILQ_REMOVE(&cmng->counters[cnt_type], cnt_free, next);
5979         rte_spinlock_unlock(&cmng->csl[cnt_type]);
5980         if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free, age))
5981                 goto err;
5982         pool = cnt_free->pool;
5983         if (fallback)
5984                 cnt_free->dcs_when_active = cnt_free->dcs_when_free;
5985         /* Create a DV counter action only in the first time usage. */
5986         if (!cnt_free->action) {
5987                 uint16_t offset;
5988                 struct mlx5_devx_obj *dcs;
5989                 int ret;
5990
5991                 if (!fallback) {
5992                         offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free);
5993                         dcs = pool->min_dcs;
5994                 } else {
5995                         offset = 0;
5996                         dcs = cnt_free->dcs_when_free;
5997                 }
5998                 ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset,
5999                                                             &cnt_free->action);
6000                 if (ret) {
6001                         rte_errno = errno;
6002                         goto err;
6003                 }
6004         }
6005         cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
6006                                 MLX5_CNT_ARRAY_IDX(pool, cnt_free));
6007         /* Update the counter reset values. */
6008         if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
6009                                  &cnt_free->bytes))
6010                 goto err;
6011         if (!fallback && !priv->sh->cmng.query_thread_on)
6012                 /* Start the asynchronous batch query by the host thread. */
6013                 mlx5_set_query_alarm(priv->sh);
6014         /*
6015          * When the count action isn't shared (by ID), shared_info field is
6016          * used for indirect action API's refcnt.
6017          * When the counter action is not shared neither by ID nor by indirect
6018          * action API, shared info must be 1.
6019          */
6020         cnt_free->shared_info.refcnt = 1;
6021         return cnt_idx;
6022 err:
6023         if (cnt_free) {
6024                 cnt_free->pool = pool;
6025                 if (fallback)
6026                         cnt_free->dcs_when_free = cnt_free->dcs_when_active;
6027                 rte_spinlock_lock(&cmng->csl[cnt_type]);
6028                 TAILQ_INSERT_TAIL(&cmng->counters[cnt_type], cnt_free, next);
6029                 rte_spinlock_unlock(&cmng->csl[cnt_type]);
6030         }
6031         return 0;
6032 }
6033
6034 /**
6035  * Allocate a shared flow counter.
6036  *
6037  * @param[in] ctx
6038  *   Pointer to the shared counter configuration.
6039  * @param[in] data
6040  *   Pointer to save the allocated counter index.
6041  *
6042  * @return
6043  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
6044  */
6045
6046 static int32_t
6047 flow_dv_counter_alloc_shared_cb(void *ctx, union mlx5_l3t_data *data)
6048 {
6049         struct mlx5_shared_counter_conf *conf = ctx;
6050         struct rte_eth_dev *dev = conf->dev;
6051         struct mlx5_flow_counter *cnt;
6052
6053         data->dword = flow_dv_counter_alloc(dev, 0);
6054         data->dword |= MLX5_CNT_SHARED_OFFSET;
6055         cnt = flow_dv_counter_get_by_idx(dev, data->dword, NULL);
6056         cnt->shared_info.id = conf->id;
6057         return 0;
6058 }
6059
6060 /**
6061  * Get a shared flow counter.
6062  *
6063  * @param[in] dev
6064  *   Pointer to the Ethernet device structure.
6065  * @param[in] id
6066  *   Counter identifier.
6067  *
6068  * @return
6069  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
6070  */
6071 static uint32_t
6072 flow_dv_counter_get_shared(struct rte_eth_dev *dev, uint32_t id)
6073 {
6074         struct mlx5_priv *priv = dev->data->dev_private;
6075         struct mlx5_shared_counter_conf conf = {
6076                 .dev = dev,
6077                 .id = id,
6078         };
6079         union mlx5_l3t_data data = {
6080                 .dword = 0,
6081         };
6082
6083         mlx5_l3t_prepare_entry(priv->sh->cnt_id_tbl, id, &data,
6084                                flow_dv_counter_alloc_shared_cb, &conf);
6085         return data.dword;
6086 }
6087
6088 /**
6089  * Get age param from counter index.
6090  *
6091  * @param[in] dev
6092  *   Pointer to the Ethernet device structure.
6093  * @param[in] counter
6094  *   Index to the counter handler.
6095  *
6096  * @return
6097  *   The aging parameter specified for the counter index.
6098  */
6099 static struct mlx5_age_param*
6100 flow_dv_counter_idx_get_age(struct rte_eth_dev *dev,
6101                                 uint32_t counter)
6102 {
6103         struct mlx5_flow_counter *cnt;
6104         struct mlx5_flow_counter_pool *pool = NULL;
6105
6106         flow_dv_counter_get_by_idx(dev, counter, &pool);
6107         counter = (counter - 1) % MLX5_COUNTERS_PER_POOL;
6108         cnt = MLX5_POOL_GET_CNT(pool, counter);
6109         return MLX5_CNT_TO_AGE(cnt);
6110 }
6111
6112 /**
6113  * Remove a flow counter from aged counter list.
6114  *
6115  * @param[in] dev
6116  *   Pointer to the Ethernet device structure.
6117  * @param[in] counter
6118  *   Index to the counter handler.
6119  * @param[in] cnt
6120  *   Pointer to the counter handler.
6121  */
6122 static void
6123 flow_dv_counter_remove_from_age(struct rte_eth_dev *dev,
6124                                 uint32_t counter, struct mlx5_flow_counter *cnt)
6125 {
6126         struct mlx5_age_info *age_info;
6127         struct mlx5_age_param *age_param;
6128         struct mlx5_priv *priv = dev->data->dev_private;
6129         uint16_t expected = AGE_CANDIDATE;
6130
6131         age_info = GET_PORT_AGE_INFO(priv);
6132         age_param = flow_dv_counter_idx_get_age(dev, counter);
6133         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
6134                                          AGE_FREE, false, __ATOMIC_RELAXED,
6135                                          __ATOMIC_RELAXED)) {
6136                 /**
6137                  * We need the lock even it is age timeout,
6138                  * since counter may still in process.
6139                  */
6140                 rte_spinlock_lock(&age_info->aged_sl);
6141                 TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
6142                 rte_spinlock_unlock(&age_info->aged_sl);
6143                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
6144         }
6145 }
6146
6147 /**
6148  * Release a flow counter.
6149  *
6150  * @param[in] dev
6151  *   Pointer to the Ethernet device structure.
6152  * @param[in] counter
6153  *   Index to the counter handler.
6154  */
6155 static void
6156 flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter)
6157 {
6158         struct mlx5_priv *priv = dev->data->dev_private;
6159         struct mlx5_flow_counter_pool *pool = NULL;
6160         struct mlx5_flow_counter *cnt;
6161         enum mlx5_counter_type cnt_type;
6162
6163         if (!counter)
6164                 return;
6165         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
6166         MLX5_ASSERT(pool);
6167         if (pool->is_aged) {
6168                 flow_dv_counter_remove_from_age(dev, counter, cnt);
6169         } else {
6170                 /*
6171                  * If the counter action is shared by ID, the l3t_clear_entry
6172                  * function reduces its references counter. If after the
6173                  * reduction the action is still referenced, the function
6174                  * returns here and does not release it.
6175                  */
6176                 if (IS_LEGACY_SHARED_CNT(counter) &&
6177                     mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl,
6178                                          cnt->shared_info.id))
6179                         return;
6180                 /*
6181                  * If the counter action is shared by indirect action API,
6182                  * the atomic function reduces its references counter.
6183                  * If after the reduction the action is still referenced, the
6184                  * function returns here and does not release it.
6185                  * When the counter action is not shared neither by ID nor by
6186                  * indirect action API, shared info is 1 before the reduction,
6187                  * so this condition is failed and function doesn't return here.
6188                  */
6189                 if (!IS_LEGACY_SHARED_CNT(counter) &&
6190                     __atomic_sub_fetch(&cnt->shared_info.refcnt, 1,
6191                                        __ATOMIC_RELAXED))
6192                         return;
6193         }
6194         cnt->pool = pool;
6195         /*
6196          * Put the counter back to list to be updated in none fallback mode.
6197          * Currently, we are using two list alternately, while one is in query,
6198          * add the freed counter to the other list based on the pool query_gen
6199          * value. After query finishes, add counter the list to the global
6200          * container counter list. The list changes while query starts. In
6201          * this case, lock will not be needed as query callback and release
6202          * function both operate with the different list.
6203          */
6204         if (!priv->sh->cmng.counter_fallback) {
6205                 rte_spinlock_lock(&pool->csl);
6206                 TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
6207                 rte_spinlock_unlock(&pool->csl);
6208         } else {
6209                 cnt->dcs_when_free = cnt->dcs_when_active;
6210                 cnt_type = pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
6211                                            MLX5_COUNTER_TYPE_ORIGIN;
6212                 rte_spinlock_lock(&priv->sh->cmng.csl[cnt_type]);
6213                 TAILQ_INSERT_TAIL(&priv->sh->cmng.counters[cnt_type],
6214                                   cnt, next);
6215                 rte_spinlock_unlock(&priv->sh->cmng.csl[cnt_type]);
6216         }
6217 }
6218
6219 /**
6220  * Resize a meter id container.
6221  *
6222  * @param[in] dev
6223  *   Pointer to the Ethernet device structure.
6224  *
6225  * @return
6226  *   0 on success, otherwise negative errno value and rte_errno is set.
6227  */
6228 static int
6229 flow_dv_mtr_container_resize(struct rte_eth_dev *dev)
6230 {
6231         struct mlx5_priv *priv = dev->data->dev_private;
6232         struct mlx5_aso_mtr_pools_mng *pools_mng =
6233                                 &priv->sh->mtrmng->pools_mng;
6234         void *old_pools = pools_mng->pools;
6235         uint32_t resize = pools_mng->n + MLX5_MTRS_CONTAINER_RESIZE;
6236         uint32_t mem_size = sizeof(struct mlx5_aso_mtr_pool *) * resize;
6237         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
6238
6239         if (!pools) {
6240                 rte_errno = ENOMEM;
6241                 return -ENOMEM;
6242         }
6243         if (!pools_mng->n)
6244                 if (mlx5_aso_queue_init(priv->sh, ASO_OPC_MOD_POLICER)) {
6245                         mlx5_free(pools);
6246                         return -ENOMEM;
6247                 }
6248         if (old_pools)
6249                 memcpy(pools, old_pools, pools_mng->n *
6250                                        sizeof(struct mlx5_aso_mtr_pool *));
6251         pools_mng->n = resize;
6252         pools_mng->pools = pools;
6253         if (old_pools)
6254                 mlx5_free(old_pools);
6255         return 0;
6256 }
6257
6258 /**
6259  * Prepare a new meter and/or a new meter pool.
6260  *
6261  * @param[in] dev
6262  *   Pointer to the Ethernet device structure.
6263  * @param[out] mtr_free
6264  *   Where to put the pointer of a new meter.g.
6265  *
6266  * @return
6267  *   The meter pool pointer and @mtr_free is set on success,
6268  *   NULL otherwise and rte_errno is set.
6269  */
6270 static struct mlx5_aso_mtr_pool *
6271 flow_dv_mtr_pool_create(struct rte_eth_dev *dev,
6272                              struct mlx5_aso_mtr **mtr_free)
6273 {
6274         struct mlx5_priv *priv = dev->data->dev_private;
6275         struct mlx5_aso_mtr_pools_mng *pools_mng =
6276                                 &priv->sh->mtrmng->pools_mng;
6277         struct mlx5_aso_mtr_pool *pool = NULL;
6278         struct mlx5_devx_obj *dcs = NULL;
6279         uint32_t i;
6280         uint32_t log_obj_size;
6281
6282         log_obj_size = rte_log2_u32(MLX5_ASO_MTRS_PER_POOL >> 1);
6283         dcs = mlx5_devx_cmd_create_flow_meter_aso_obj(priv->sh->ctx,
6284                         priv->sh->pdn, log_obj_size);
6285         if (!dcs) {
6286                 rte_errno = ENODATA;
6287                 return NULL;
6288         }
6289         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
6290         if (!pool) {
6291                 rte_errno = ENOMEM;
6292                 claim_zero(mlx5_devx_cmd_destroy(dcs));
6293                 return NULL;
6294         }
6295         pool->devx_obj = dcs;
6296         pool->index = pools_mng->n_valid;
6297         if (pool->index == pools_mng->n && flow_dv_mtr_container_resize(dev)) {
6298                 mlx5_free(pool);
6299                 claim_zero(mlx5_devx_cmd_destroy(dcs));
6300                 return NULL;
6301         }
6302         pools_mng->pools[pool->index] = pool;
6303         pools_mng->n_valid++;
6304         for (i = 1; i < MLX5_ASO_MTRS_PER_POOL; ++i) {
6305                 pool->mtrs[i].offset = i;
6306                 LIST_INSERT_HEAD(&pools_mng->meters,
6307                                                 &pool->mtrs[i], next);
6308         }
6309         pool->mtrs[0].offset = 0;
6310         *mtr_free = &pool->mtrs[0];
6311         return pool;
6312 }
6313
6314 /**
6315  * Release a flow meter into pool.
6316  *
6317  * @param[in] dev
6318  *   Pointer to the Ethernet device structure.
6319  * @param[in] mtr_idx
6320  *   Index to aso flow meter.
6321  */
6322 static void
6323 flow_dv_aso_mtr_release_to_pool(struct rte_eth_dev *dev, uint32_t mtr_idx)
6324 {
6325         struct mlx5_priv *priv = dev->data->dev_private;
6326         struct mlx5_aso_mtr_pools_mng *pools_mng =
6327                                 &priv->sh->mtrmng->pools_mng;
6328         struct mlx5_aso_mtr *aso_mtr = mlx5_aso_meter_by_idx(priv, mtr_idx);
6329
6330         MLX5_ASSERT(aso_mtr);
6331         rte_spinlock_lock(&pools_mng->mtrsl);
6332         memset(&aso_mtr->fm, 0, sizeof(struct mlx5_flow_meter_info));
6333         aso_mtr->state = ASO_METER_FREE;
6334         LIST_INSERT_HEAD(&pools_mng->meters, aso_mtr, next);
6335         rte_spinlock_unlock(&pools_mng->mtrsl);
6336 }
6337
6338 /**
6339  * Allocate a aso flow meter.
6340  *
6341  * @param[in] dev
6342  *   Pointer to the Ethernet device structure.
6343  *
6344  * @return
6345  *   Index to aso flow meter on success, 0 otherwise and rte_errno is set.
6346  */
6347 static uint32_t
6348 flow_dv_mtr_alloc(struct rte_eth_dev *dev)
6349 {
6350         struct mlx5_priv *priv = dev->data->dev_private;
6351         struct mlx5_aso_mtr *mtr_free = NULL;
6352         struct mlx5_aso_mtr_pools_mng *pools_mng =
6353                                 &priv->sh->mtrmng->pools_mng;
6354         struct mlx5_aso_mtr_pool *pool;
6355         uint32_t mtr_idx = 0;
6356
6357         if (!priv->config.devx) {
6358                 rte_errno = ENOTSUP;
6359                 return 0;
6360         }
6361         /* Allocate the flow meter memory. */
6362         /* Get free meters from management. */
6363         rte_spinlock_lock(&pools_mng->mtrsl);
6364         mtr_free = LIST_FIRST(&pools_mng->meters);
6365         if (mtr_free)
6366                 LIST_REMOVE(mtr_free, next);
6367         if (!mtr_free && !flow_dv_mtr_pool_create(dev, &mtr_free)) {
6368                 rte_spinlock_unlock(&pools_mng->mtrsl);
6369                 return 0;
6370         }
6371         mtr_free->state = ASO_METER_WAIT;
6372         rte_spinlock_unlock(&pools_mng->mtrsl);
6373         pool = container_of(mtr_free,
6374                         struct mlx5_aso_mtr_pool,
6375                         mtrs[mtr_free->offset]);
6376         mtr_idx = MLX5_MAKE_MTR_IDX(pool->index, mtr_free->offset);
6377         if (!mtr_free->fm.meter_action) {
6378 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
6379                 struct rte_flow_error error;
6380                 uint8_t reg_id;
6381
6382                 reg_id = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &error);
6383                 mtr_free->fm.meter_action =
6384                         mlx5_glue->dv_create_flow_action_aso
6385                                                 (priv->sh->rx_domain,
6386                                                  pool->devx_obj->obj,
6387                                                  mtr_free->offset,
6388                                                  (1 << MLX5_FLOW_COLOR_GREEN),
6389                                                  reg_id - REG_C_0);
6390 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
6391                 if (!mtr_free->fm.meter_action) {
6392                         flow_dv_aso_mtr_release_to_pool(dev, mtr_idx);
6393                         return 0;
6394                 }
6395         }
6396         return mtr_idx;
6397 }
6398
6399 /**
6400  * Verify the @p attributes will be correctly understood by the NIC and store
6401  * them in the @p flow if everything is correct.
6402  *
6403  * @param[in] dev
6404  *   Pointer to dev struct.
6405  * @param[in] attributes
6406  *   Pointer to flow attributes
6407  * @param[in] external
6408  *   This flow rule is created by request external to PMD.
6409  * @param[out] error
6410  *   Pointer to error structure.
6411  *
6412  * @return
6413  *   - 0 on success and non root table.
6414  *   - 1 on success and root table.
6415  *   - a negative errno value otherwise and rte_errno is set.
6416  */
6417 static int
6418 flow_dv_validate_attributes(struct rte_eth_dev *dev,
6419                             const struct mlx5_flow_tunnel *tunnel,
6420                             const struct rte_flow_attr *attributes,
6421                             const struct flow_grp_info *grp_info,
6422                             struct rte_flow_error *error)
6423 {
6424         struct mlx5_priv *priv = dev->data->dev_private;
6425         uint32_t lowest_priority = mlx5_get_lowest_priority(dev, attributes);
6426         int ret = 0;
6427
6428 #ifndef HAVE_MLX5DV_DR
6429         RTE_SET_USED(tunnel);
6430         RTE_SET_USED(grp_info);
6431         if (attributes->group)
6432                 return rte_flow_error_set(error, ENOTSUP,
6433                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
6434                                           NULL,
6435                                           "groups are not supported");
6436 #else
6437         uint32_t table = 0;
6438
6439         ret = mlx5_flow_group_to_table(dev, tunnel, attributes->group, &table,
6440                                        grp_info, error);
6441         if (ret)
6442                 return ret;
6443         if (!table)
6444                 ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
6445 #endif
6446         if (attributes->priority != MLX5_FLOW_LOWEST_PRIO_INDICATOR &&
6447             attributes->priority > lowest_priority)
6448                 return rte_flow_error_set(error, ENOTSUP,
6449                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
6450                                           NULL,
6451                                           "priority out of range");
6452         if (attributes->transfer) {
6453                 if (!priv->config.dv_esw_en)
6454                         return rte_flow_error_set
6455                                 (error, ENOTSUP,
6456                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6457                                  "E-Switch dr is not supported");
6458                 if (!(priv->representor || priv->master))
6459                         return rte_flow_error_set
6460                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6461                                  NULL, "E-Switch configuration can only be"
6462                                  " done by a master or a representor device");
6463                 if (attributes->egress)
6464                         return rte_flow_error_set
6465                                 (error, ENOTSUP,
6466                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
6467                                  "egress is not supported");
6468         }
6469         if (!(attributes->egress ^ attributes->ingress))
6470                 return rte_flow_error_set(error, ENOTSUP,
6471                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
6472                                           "must specify exactly one of "
6473                                           "ingress or egress");
6474         return ret;
6475 }
6476
6477 static uint16_t
6478 mlx5_flow_locate_proto_l3(const struct rte_flow_item **head,
6479                           const struct rte_flow_item *end)
6480 {
6481         const struct rte_flow_item *item = *head;
6482         uint16_t l3_protocol;
6483
6484         for (; item != end; item++) {
6485                 switch (item->type) {
6486                 default:
6487                         break;
6488                 case RTE_FLOW_ITEM_TYPE_IPV4:
6489                         l3_protocol = RTE_ETHER_TYPE_IPV4;
6490                         goto l3_ok;
6491                 case RTE_FLOW_ITEM_TYPE_IPV6:
6492                         l3_protocol = RTE_ETHER_TYPE_IPV6;
6493                         goto l3_ok;
6494                 case RTE_FLOW_ITEM_TYPE_ETH:
6495                         if (item->mask && item->spec) {
6496                                 MLX5_ETHER_TYPE_FROM_HEADER(rte_flow_item_eth,
6497                                                             type, item,
6498                                                             l3_protocol);
6499                                 if (l3_protocol == RTE_ETHER_TYPE_IPV4 ||
6500                                     l3_protocol == RTE_ETHER_TYPE_IPV6)
6501                                         goto l3_ok;
6502                         }
6503                         break;
6504                 case RTE_FLOW_ITEM_TYPE_VLAN:
6505                         if (item->mask && item->spec) {
6506                                 MLX5_ETHER_TYPE_FROM_HEADER(rte_flow_item_vlan,
6507                                                             inner_type, item,
6508                                                             l3_protocol);
6509                                 if (l3_protocol == RTE_ETHER_TYPE_IPV4 ||
6510                                     l3_protocol == RTE_ETHER_TYPE_IPV6)
6511                                         goto l3_ok;
6512                         }
6513                         break;
6514                 }
6515         }
6516         return 0;
6517 l3_ok:
6518         *head = item;
6519         return l3_protocol;
6520 }
6521
6522 static uint8_t
6523 mlx5_flow_locate_proto_l4(const struct rte_flow_item **head,
6524                           const struct rte_flow_item *end)
6525 {
6526         const struct rte_flow_item *item = *head;
6527         uint8_t l4_protocol;
6528
6529         for (; item != end; item++) {
6530                 switch (item->type) {
6531                 default:
6532                         break;
6533                 case RTE_FLOW_ITEM_TYPE_TCP:
6534                         l4_protocol = IPPROTO_TCP;
6535                         goto l4_ok;
6536                 case RTE_FLOW_ITEM_TYPE_UDP:
6537                         l4_protocol = IPPROTO_UDP;
6538                         goto l4_ok;
6539                 case RTE_FLOW_ITEM_TYPE_IPV4:
6540                         if (item->mask && item->spec) {
6541                                 const struct rte_flow_item_ipv4 *mask, *spec;
6542
6543                                 mask = (typeof(mask))item->mask;
6544                                 spec = (typeof(spec))item->spec;
6545                                 l4_protocol = mask->hdr.next_proto_id &
6546                                               spec->hdr.next_proto_id;
6547                                 if (l4_protocol == IPPROTO_TCP ||
6548                                     l4_protocol == IPPROTO_UDP)
6549                                         goto l4_ok;
6550                         }
6551                         break;
6552                 case RTE_FLOW_ITEM_TYPE_IPV6:
6553                         if (item->mask && item->spec) {
6554                                 const struct rte_flow_item_ipv6 *mask, *spec;
6555                                 mask = (typeof(mask))item->mask;
6556                                 spec = (typeof(spec))item->spec;
6557                                 l4_protocol = mask->hdr.proto & spec->hdr.proto;
6558                                 if (l4_protocol == IPPROTO_TCP ||
6559                                     l4_protocol == IPPROTO_UDP)
6560                                         goto l4_ok;
6561                         }
6562                         break;
6563                 }
6564         }
6565         return 0;
6566 l4_ok:
6567         *head = item;
6568         return l4_protocol;
6569 }
6570
6571 static int
6572 flow_dv_validate_item_integrity(struct rte_eth_dev *dev,
6573                                 const struct rte_flow_item *rule_items,
6574                                 const struct rte_flow_item *integrity_item,
6575                                 struct rte_flow_error *error)
6576 {
6577         struct mlx5_priv *priv = dev->data->dev_private;
6578         const struct rte_flow_item *tunnel_item, *end_item, *item = rule_items;
6579         const struct rte_flow_item_integrity *mask = (typeof(mask))
6580                                                      integrity_item->mask;
6581         const struct rte_flow_item_integrity *spec = (typeof(spec))
6582                                                      integrity_item->spec;
6583         uint32_t protocol;
6584
6585         if (!priv->config.hca_attr.pkt_integrity_match)
6586                 return rte_flow_error_set(error, ENOTSUP,
6587                                           RTE_FLOW_ERROR_TYPE_ITEM,
6588                                           integrity_item,
6589                                           "packet integrity integrity_item not supported");
6590         if (!mask)
6591                 mask = &rte_flow_item_integrity_mask;
6592         if (!mlx5_validate_integrity_item(mask))
6593                 return rte_flow_error_set(error, ENOTSUP,
6594                                           RTE_FLOW_ERROR_TYPE_ITEM,
6595                                           integrity_item,
6596                                           "unsupported integrity filter");
6597         tunnel_item = mlx5_flow_find_tunnel_item(rule_items);
6598         if (spec->level > 1) {
6599                 if (!tunnel_item)
6600                         return rte_flow_error_set(error, ENOTSUP,
6601                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6602                                                   integrity_item,
6603                                                   "missing tunnel item");
6604                 item = tunnel_item;
6605                 end_item = mlx5_find_end_item(tunnel_item);
6606         } else {
6607                 end_item = tunnel_item ? tunnel_item :
6608                            mlx5_find_end_item(integrity_item);
6609         }
6610         if (mask->l3_ok || mask->ipv4_csum_ok) {
6611                 protocol = mlx5_flow_locate_proto_l3(&item, end_item);
6612                 if (!protocol)
6613                         return rte_flow_error_set(error, EINVAL,
6614                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6615                                                   integrity_item,
6616                                                   "missing L3 protocol");
6617         }
6618         if (mask->l4_ok || mask->l4_csum_ok) {
6619                 protocol = mlx5_flow_locate_proto_l4(&item, end_item);
6620                 if (!protocol)
6621                         return rte_flow_error_set(error, EINVAL,
6622                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6623                                                   integrity_item,
6624                                                   "missing L4 protocol");
6625         }
6626         return 0;
6627 }
6628
6629 /**
6630  * Internal validation function. For validating both actions and items.
6631  *
6632  * @param[in] dev
6633  *   Pointer to the rte_eth_dev structure.
6634  * @param[in] attr
6635  *   Pointer to the flow attributes.
6636  * @param[in] items
6637  *   Pointer to the list of items.
6638  * @param[in] actions
6639  *   Pointer to the list of actions.
6640  * @param[in] external
6641  *   This flow rule is created by request external to PMD.
6642  * @param[in] hairpin
6643  *   Number of hairpin TX actions, 0 means classic flow.
6644  * @param[out] error
6645  *   Pointer to the error structure.
6646  *
6647  * @return
6648  *   0 on success, a negative errno value otherwise and rte_errno is set.
6649  */
6650 static int
6651 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
6652                  const struct rte_flow_item items[],
6653                  const struct rte_flow_action actions[],
6654                  bool external, int hairpin, struct rte_flow_error *error)
6655 {
6656         int ret;
6657         uint64_t action_flags = 0;
6658         uint64_t item_flags = 0;
6659         uint64_t last_item = 0;
6660         uint8_t next_protocol = 0xff;
6661         uint16_t ether_type = 0;
6662         int actions_n = 0;
6663         uint8_t item_ipv6_proto = 0;
6664         int fdb_mirror_limit = 0;
6665         int modify_after_mirror = 0;
6666         const struct rte_flow_item *geneve_item = NULL;
6667         const struct rte_flow_item *gre_item = NULL;
6668         const struct rte_flow_item *gtp_item = NULL;
6669         const struct rte_flow_action_raw_decap *decap;
6670         const struct rte_flow_action_raw_encap *encap;
6671         const struct rte_flow_action_rss *rss = NULL;
6672         const struct rte_flow_action_rss *sample_rss = NULL;
6673         const struct rte_flow_action_count *sample_count = NULL;
6674         const struct rte_flow_item_tcp nic_tcp_mask = {
6675                 .hdr = {
6676                         .tcp_flags = 0xFF,
6677                         .src_port = RTE_BE16(UINT16_MAX),
6678                         .dst_port = RTE_BE16(UINT16_MAX),
6679                 }
6680         };
6681         const struct rte_flow_item_ipv6 nic_ipv6_mask = {
6682                 .hdr = {
6683                         .src_addr =
6684                         "\xff\xff\xff\xff\xff\xff\xff\xff"
6685                         "\xff\xff\xff\xff\xff\xff\xff\xff",
6686                         .dst_addr =
6687                         "\xff\xff\xff\xff\xff\xff\xff\xff"
6688                         "\xff\xff\xff\xff\xff\xff\xff\xff",
6689                         .vtc_flow = RTE_BE32(0xffffffff),
6690                         .proto = 0xff,
6691                         .hop_limits = 0xff,
6692                 },
6693                 .has_frag_ext = 1,
6694         };
6695         const struct rte_flow_item_ecpri nic_ecpri_mask = {
6696                 .hdr = {
6697                         .common = {
6698                                 .u32 =
6699                                 RTE_BE32(((const struct rte_ecpri_common_hdr) {
6700                                         .type = 0xFF,
6701                                         }).u32),
6702                         },
6703                         .dummy[0] = 0xffffffff,
6704                 },
6705         };
6706         struct mlx5_priv *priv = dev->data->dev_private;
6707         struct mlx5_dev_config *dev_conf = &priv->config;
6708         uint16_t queue_index = 0xFFFF;
6709         const struct rte_flow_item_vlan *vlan_m = NULL;
6710         uint32_t rw_act_num = 0;
6711         uint64_t is_root;
6712         const struct mlx5_flow_tunnel *tunnel;
6713         enum mlx5_tof_rule_type tof_rule_type;
6714         struct flow_grp_info grp_info = {
6715                 .external = !!external,
6716                 .transfer = !!attr->transfer,
6717                 .fdb_def_rule = !!priv->fdb_def_rule,
6718                 .std_tbl_fix = true,
6719         };
6720         const struct rte_eth_hairpin_conf *conf;
6721         const struct rte_flow_item *rule_items = items;
6722         const struct rte_flow_item *port_id_item = NULL;
6723         bool def_policy = false;
6724
6725         if (items == NULL)
6726                 return -1;
6727         tunnel = is_tunnel_offload_active(dev) ?
6728                  mlx5_get_tof(items, actions, &tof_rule_type) : NULL;
6729         if (tunnel) {
6730                 if (priv->representor)
6731                         return rte_flow_error_set
6732                                 (error, ENOTSUP,
6733                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6734                                  NULL, "decap not supported for VF representor");
6735                 if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_SET_RULE)
6736                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
6737                 else if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_MATCH_RULE)
6738                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |
6739                                         MLX5_FLOW_ACTION_DECAP;
6740                 grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
6741                                         (dev, attr, tunnel, tof_rule_type);
6742         }
6743         ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error);
6744         if (ret < 0)
6745                 return ret;
6746         is_root = (uint64_t)ret;
6747         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
6748                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
6749                 int type = items->type;
6750
6751                 if (!mlx5_flow_os_item_supported(type))
6752                         return rte_flow_error_set(error, ENOTSUP,
6753                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6754                                                   NULL, "item not supported");
6755                 switch (type) {
6756                 case RTE_FLOW_ITEM_TYPE_VOID:
6757                         break;
6758                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
6759                         ret = flow_dv_validate_item_port_id
6760                                         (dev, items, attr, item_flags, error);
6761                         if (ret < 0)
6762                                 return ret;
6763                         last_item = MLX5_FLOW_ITEM_PORT_ID;
6764                         port_id_item = items;
6765                         break;
6766                 case RTE_FLOW_ITEM_TYPE_ETH:
6767                         ret = mlx5_flow_validate_item_eth(items, item_flags,
6768                                                           true, error);
6769                         if (ret < 0)
6770                                 return ret;
6771                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
6772                                              MLX5_FLOW_LAYER_OUTER_L2;
6773                         if (items->mask != NULL && items->spec != NULL) {
6774                                 ether_type =
6775                                         ((const struct rte_flow_item_eth *)
6776                                          items->spec)->type;
6777                                 ether_type &=
6778                                         ((const struct rte_flow_item_eth *)
6779                                          items->mask)->type;
6780                                 ether_type = rte_be_to_cpu_16(ether_type);
6781                         } else {
6782                                 ether_type = 0;
6783                         }
6784                         break;
6785                 case RTE_FLOW_ITEM_TYPE_VLAN:
6786                         ret = flow_dv_validate_item_vlan(items, item_flags,
6787                                                          dev, error);
6788                         if (ret < 0)
6789                                 return ret;
6790                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
6791                                              MLX5_FLOW_LAYER_OUTER_VLAN;
6792                         if (items->mask != NULL && items->spec != NULL) {
6793                                 ether_type =
6794                                         ((const struct rte_flow_item_vlan *)
6795                                          items->spec)->inner_type;
6796                                 ether_type &=
6797                                         ((const struct rte_flow_item_vlan *)
6798                                          items->mask)->inner_type;
6799                                 ether_type = rte_be_to_cpu_16(ether_type);
6800                         } else {
6801                                 ether_type = 0;
6802                         }
6803                         /* Store outer VLAN mask for of_push_vlan action. */
6804                         if (!tunnel)
6805                                 vlan_m = items->mask;
6806                         break;
6807                 case RTE_FLOW_ITEM_TYPE_IPV4:
6808                         mlx5_flow_tunnel_ip_check(items, next_protocol,
6809                                                   &item_flags, &tunnel);
6810                         ret = flow_dv_validate_item_ipv4(items, item_flags,
6811                                                          last_item, ether_type,
6812                                                          error);
6813                         if (ret < 0)
6814                                 return ret;
6815                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
6816                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
6817                         if (items->mask != NULL &&
6818                             ((const struct rte_flow_item_ipv4 *)
6819                              items->mask)->hdr.next_proto_id) {
6820                                 next_protocol =
6821                                         ((const struct rte_flow_item_ipv4 *)
6822                                          (items->spec))->hdr.next_proto_id;
6823                                 next_protocol &=
6824                                         ((const struct rte_flow_item_ipv4 *)
6825                                          (items->mask))->hdr.next_proto_id;
6826                         } else {
6827                                 /* Reset for inner layer. */
6828                                 next_protocol = 0xff;
6829                         }
6830                         break;
6831                 case RTE_FLOW_ITEM_TYPE_IPV6:
6832                         mlx5_flow_tunnel_ip_check(items, next_protocol,
6833                                                   &item_flags, &tunnel);
6834                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
6835                                                            last_item,
6836                                                            ether_type,
6837                                                            &nic_ipv6_mask,
6838                                                            error);
6839                         if (ret < 0)
6840                                 return ret;
6841                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
6842                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
6843                         if (items->mask != NULL &&
6844                             ((const struct rte_flow_item_ipv6 *)
6845                              items->mask)->hdr.proto) {
6846                                 item_ipv6_proto =
6847                                         ((const struct rte_flow_item_ipv6 *)
6848                                          items->spec)->hdr.proto;
6849                                 next_protocol =
6850                                         ((const struct rte_flow_item_ipv6 *)
6851                                          items->spec)->hdr.proto;
6852                                 next_protocol &=
6853                                         ((const struct rte_flow_item_ipv6 *)
6854                                          items->mask)->hdr.proto;
6855                         } else {
6856                                 /* Reset for inner layer. */
6857                                 next_protocol = 0xff;
6858                         }
6859                         break;
6860                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
6861                         ret = flow_dv_validate_item_ipv6_frag_ext(items,
6862                                                                   item_flags,
6863                                                                   error);
6864                         if (ret < 0)
6865                                 return ret;
6866                         last_item = tunnel ?
6867                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
6868                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
6869                         if (items->mask != NULL &&
6870                             ((const struct rte_flow_item_ipv6_frag_ext *)
6871                              items->mask)->hdr.next_header) {
6872                                 next_protocol =
6873                                 ((const struct rte_flow_item_ipv6_frag_ext *)
6874                                  items->spec)->hdr.next_header;
6875                                 next_protocol &=
6876                                 ((const struct rte_flow_item_ipv6_frag_ext *)
6877                                  items->mask)->hdr.next_header;
6878                         } else {
6879                                 /* Reset for inner layer. */
6880                                 next_protocol = 0xff;
6881                         }
6882                         break;
6883                 case RTE_FLOW_ITEM_TYPE_TCP:
6884                         ret = mlx5_flow_validate_item_tcp
6885                                                 (items, item_flags,
6886                                                  next_protocol,
6887                                                  &nic_tcp_mask,
6888                                                  error);
6889                         if (ret < 0)
6890                                 return ret;
6891                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
6892                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
6893                         break;
6894                 case RTE_FLOW_ITEM_TYPE_UDP:
6895                         ret = mlx5_flow_validate_item_udp(items, item_flags,
6896                                                           next_protocol,
6897                                                           error);
6898                         if (ret < 0)
6899                                 return ret;
6900                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
6901                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
6902                         break;
6903                 case RTE_FLOW_ITEM_TYPE_GRE:
6904                         ret = mlx5_flow_validate_item_gre(items, item_flags,
6905                                                           next_protocol, error);
6906                         if (ret < 0)
6907                                 return ret;
6908                         gre_item = items;
6909                         last_item = MLX5_FLOW_LAYER_GRE;
6910                         break;
6911                 case RTE_FLOW_ITEM_TYPE_NVGRE:
6912                         ret = mlx5_flow_validate_item_nvgre(items, item_flags,
6913                                                             next_protocol,
6914                                                             error);
6915                         if (ret < 0)
6916                                 return ret;
6917                         last_item = MLX5_FLOW_LAYER_NVGRE;
6918                         break;
6919                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
6920                         ret = mlx5_flow_validate_item_gre_key
6921                                 (items, item_flags, gre_item, error);
6922                         if (ret < 0)
6923                                 return ret;
6924                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
6925                         break;
6926                 case RTE_FLOW_ITEM_TYPE_VXLAN:
6927                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
6928                                                             error);
6929                         if (ret < 0)
6930                                 return ret;
6931                         last_item = MLX5_FLOW_LAYER_VXLAN;
6932                         break;
6933                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
6934                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
6935                                                                 item_flags, dev,
6936                                                                 error);
6937                         if (ret < 0)
6938                                 return ret;
6939                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
6940                         break;
6941                 case RTE_FLOW_ITEM_TYPE_GENEVE:
6942                         ret = mlx5_flow_validate_item_geneve(items,
6943                                                              item_flags, dev,
6944                                                              error);
6945                         if (ret < 0)
6946                                 return ret;
6947                         geneve_item = items;
6948                         last_item = MLX5_FLOW_LAYER_GENEVE;
6949                         break;
6950                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
6951                         ret = mlx5_flow_validate_item_geneve_opt(items,
6952                                                                  last_item,
6953                                                                  geneve_item,
6954                                                                  dev,
6955                                                                  error);
6956                         if (ret < 0)
6957                                 return ret;
6958                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
6959                         break;
6960                 case RTE_FLOW_ITEM_TYPE_MPLS:
6961                         ret = mlx5_flow_validate_item_mpls(dev, items,
6962                                                            item_flags,
6963                                                            last_item, error);
6964                         if (ret < 0)
6965                                 return ret;
6966                         last_item = MLX5_FLOW_LAYER_MPLS;
6967                         break;
6968
6969                 case RTE_FLOW_ITEM_TYPE_MARK:
6970                         ret = flow_dv_validate_item_mark(dev, items, attr,
6971                                                          error);
6972                         if (ret < 0)
6973                                 return ret;
6974                         last_item = MLX5_FLOW_ITEM_MARK;
6975                         break;
6976                 case RTE_FLOW_ITEM_TYPE_META:
6977                         ret = flow_dv_validate_item_meta(dev, items, attr,
6978                                                          error);
6979                         if (ret < 0)
6980                                 return ret;
6981                         last_item = MLX5_FLOW_ITEM_METADATA;
6982                         break;
6983                 case RTE_FLOW_ITEM_TYPE_ICMP:
6984                         ret = mlx5_flow_validate_item_icmp(items, item_flags,
6985                                                            next_protocol,
6986                                                            error);
6987                         if (ret < 0)
6988                                 return ret;
6989                         last_item = MLX5_FLOW_LAYER_ICMP;
6990                         break;
6991                 case RTE_FLOW_ITEM_TYPE_ICMP6:
6992                         ret = mlx5_flow_validate_item_icmp6(items, item_flags,
6993                                                             next_protocol,
6994                                                             error);
6995                         if (ret < 0)
6996                                 return ret;
6997                         item_ipv6_proto = IPPROTO_ICMPV6;
6998                         last_item = MLX5_FLOW_LAYER_ICMP6;
6999                         break;
7000                 case RTE_FLOW_ITEM_TYPE_TAG:
7001                         ret = flow_dv_validate_item_tag(dev, items,
7002                                                         attr, error);
7003                         if (ret < 0)
7004                                 return ret;
7005                         last_item = MLX5_FLOW_ITEM_TAG;
7006                         break;
7007                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
7008                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
7009                         break;
7010                 case RTE_FLOW_ITEM_TYPE_GTP:
7011                         ret = flow_dv_validate_item_gtp(dev, items, item_flags,
7012                                                         error);
7013                         if (ret < 0)
7014                                 return ret;
7015                         gtp_item = items;
7016                         last_item = MLX5_FLOW_LAYER_GTP;
7017                         break;
7018                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
7019                         ret = flow_dv_validate_item_gtp_psc(items, last_item,
7020                                                             gtp_item, attr,
7021                                                             error);
7022                         if (ret < 0)
7023                                 return ret;
7024                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
7025                         break;
7026                 case RTE_FLOW_ITEM_TYPE_ECPRI:
7027                         /* Capacity will be checked in the translate stage. */
7028                         ret = mlx5_flow_validate_item_ecpri(items, item_flags,
7029                                                             last_item,
7030                                                             ether_type,
7031                                                             &nic_ecpri_mask,
7032                                                             error);
7033                         if (ret < 0)
7034                                 return ret;
7035                         last_item = MLX5_FLOW_LAYER_ECPRI;
7036                         break;
7037                 case RTE_FLOW_ITEM_TYPE_INTEGRITY:
7038                         if (item_flags & MLX5_FLOW_ITEM_INTEGRITY)
7039                                 return rte_flow_error_set
7040                                         (error, ENOTSUP,
7041                                          RTE_FLOW_ERROR_TYPE_ITEM,
7042                                          NULL, "multiple integrity items not supported");
7043                         ret = flow_dv_validate_item_integrity(dev, rule_items,
7044                                                               items, error);
7045                         if (ret < 0)
7046                                 return ret;
7047                         last_item = MLX5_FLOW_ITEM_INTEGRITY;
7048                         break;
7049                 case RTE_FLOW_ITEM_TYPE_CONNTRACK:
7050                         ret = flow_dv_validate_item_aso_ct(dev, items,
7051                                                            &item_flags, error);
7052                         if (ret < 0)
7053                                 return ret;
7054                         break;
7055                 case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
7056                         /* tunnel offload item was processed before
7057                          * list it here as a supported type
7058                          */
7059                         break;
7060                 default:
7061                         return rte_flow_error_set(error, ENOTSUP,
7062                                                   RTE_FLOW_ERROR_TYPE_ITEM,
7063                                                   NULL, "item not supported");
7064                 }
7065                 item_flags |= last_item;
7066         }
7067         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
7068                 int type = actions->type;
7069                 bool shared_count = false;
7070
7071                 if (!mlx5_flow_os_action_supported(type))
7072                         return rte_flow_error_set(error, ENOTSUP,
7073                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7074                                                   actions,
7075                                                   "action not supported");
7076                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
7077                         return rte_flow_error_set(error, ENOTSUP,
7078                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7079                                                   actions, "too many actions");
7080                 if (action_flags &
7081                         MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
7082                         return rte_flow_error_set(error, ENOTSUP,
7083                                 RTE_FLOW_ERROR_TYPE_ACTION,
7084                                 NULL, "meter action with policy "
7085                                 "must be the last action");
7086                 switch (type) {
7087                 case RTE_FLOW_ACTION_TYPE_VOID:
7088                         break;
7089                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
7090                         ret = flow_dv_validate_action_port_id(dev,
7091                                                               action_flags,
7092                                                               actions,
7093                                                               attr,
7094                                                               error);
7095                         if (ret)
7096                                 return ret;
7097                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
7098                         ++actions_n;
7099                         break;
7100                 case RTE_FLOW_ACTION_TYPE_FLAG:
7101                         ret = flow_dv_validate_action_flag(dev, action_flags,
7102                                                            attr, error);
7103                         if (ret < 0)
7104                                 return ret;
7105                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
7106                                 /* Count all modify-header actions as one. */
7107                                 if (!(action_flags &
7108                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
7109                                         ++actions_n;
7110                                 action_flags |= MLX5_FLOW_ACTION_FLAG |
7111                                                 MLX5_FLOW_ACTION_MARK_EXT;
7112                                 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7113                                         modify_after_mirror = 1;
7114
7115                         } else {
7116                                 action_flags |= MLX5_FLOW_ACTION_FLAG;
7117                                 ++actions_n;
7118                         }
7119                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
7120                         break;
7121                 case RTE_FLOW_ACTION_TYPE_MARK:
7122                         ret = flow_dv_validate_action_mark(dev, actions,
7123                                                            action_flags,
7124                                                            attr, error);
7125                         if (ret < 0)
7126                                 return ret;
7127                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
7128                                 /* Count all modify-header actions as one. */
7129                                 if (!(action_flags &
7130                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
7131                                         ++actions_n;
7132                                 action_flags |= MLX5_FLOW_ACTION_MARK |
7133                                                 MLX5_FLOW_ACTION_MARK_EXT;
7134                                 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7135                                         modify_after_mirror = 1;
7136                         } else {
7137                                 action_flags |= MLX5_FLOW_ACTION_MARK;
7138                                 ++actions_n;
7139                         }
7140                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
7141                         break;
7142                 case RTE_FLOW_ACTION_TYPE_SET_META:
7143                         ret = flow_dv_validate_action_set_meta(dev, actions,
7144                                                                action_flags,
7145                                                                attr, error);
7146                         if (ret < 0)
7147                                 return ret;
7148                         /* Count all modify-header actions as one action. */
7149                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7150                                 ++actions_n;
7151                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7152                                 modify_after_mirror = 1;
7153                         action_flags |= MLX5_FLOW_ACTION_SET_META;
7154                         rw_act_num += MLX5_ACT_NUM_SET_META;
7155                         break;
7156                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
7157                         ret = flow_dv_validate_action_set_tag(dev, actions,
7158                                                               action_flags,
7159                                                               attr, error);
7160                         if (ret < 0)
7161                                 return ret;
7162                         /* Count all modify-header actions as one action. */
7163                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7164                                 ++actions_n;
7165                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7166                                 modify_after_mirror = 1;
7167                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
7168                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7169                         break;
7170                 case RTE_FLOW_ACTION_TYPE_DROP:
7171                         ret = mlx5_flow_validate_action_drop(action_flags,
7172                                                              attr, error);
7173                         if (ret < 0)
7174                                 return ret;
7175                         action_flags |= MLX5_FLOW_ACTION_DROP;
7176                         ++actions_n;
7177                         break;
7178                 case RTE_FLOW_ACTION_TYPE_QUEUE:
7179                         ret = mlx5_flow_validate_action_queue(actions,
7180                                                               action_flags, dev,
7181                                                               attr, error);
7182                         if (ret < 0)
7183                                 return ret;
7184                         queue_index = ((const struct rte_flow_action_queue *)
7185                                                         (actions->conf))->index;
7186                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
7187                         ++actions_n;
7188                         break;
7189                 case RTE_FLOW_ACTION_TYPE_RSS:
7190                         rss = actions->conf;
7191                         ret = mlx5_flow_validate_action_rss(actions,
7192                                                             action_flags, dev,
7193                                                             attr, item_flags,
7194                                                             error);
7195                         if (ret < 0)
7196                                 return ret;
7197                         if (rss && sample_rss &&
7198                             (sample_rss->level != rss->level ||
7199                             sample_rss->types != rss->types))
7200                                 return rte_flow_error_set(error, ENOTSUP,
7201                                         RTE_FLOW_ERROR_TYPE_ACTION,
7202                                         NULL,
7203                                         "Can't use the different RSS types "
7204                                         "or level in the same flow");
7205                         if (rss != NULL && rss->queue_num)
7206                                 queue_index = rss->queue[0];
7207                         action_flags |= MLX5_FLOW_ACTION_RSS;
7208                         ++actions_n;
7209                         break;
7210                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
7211                         ret =
7212                         mlx5_flow_validate_action_default_miss(action_flags,
7213                                         attr, error);
7214                         if (ret < 0)
7215                                 return ret;
7216                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
7217                         ++actions_n;
7218                         break;
7219                 case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
7220                 case RTE_FLOW_ACTION_TYPE_COUNT:
7221                         shared_count = is_shared_action_count(actions);
7222                         ret = flow_dv_validate_action_count(dev, shared_count,
7223                                                             action_flags,
7224                                                             error);
7225                         if (ret < 0)
7226                                 return ret;
7227                         action_flags |= MLX5_FLOW_ACTION_COUNT;
7228                         ++actions_n;
7229                         break;
7230                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
7231                         if (flow_dv_validate_action_pop_vlan(dev,
7232                                                              action_flags,
7233                                                              actions,
7234                                                              item_flags, attr,
7235                                                              error))
7236                                 return -rte_errno;
7237                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7238                                 modify_after_mirror = 1;
7239                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
7240                         ++actions_n;
7241                         break;
7242                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
7243                         ret = flow_dv_validate_action_push_vlan(dev,
7244                                                                 action_flags,
7245                                                                 vlan_m,
7246                                                                 actions, attr,
7247                                                                 error);
7248                         if (ret < 0)
7249                                 return ret;
7250                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7251                                 modify_after_mirror = 1;
7252                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
7253                         ++actions_n;
7254                         break;
7255                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
7256                         ret = flow_dv_validate_action_set_vlan_pcp
7257                                                 (action_flags, actions, error);
7258                         if (ret < 0)
7259                                 return ret;
7260                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7261                                 modify_after_mirror = 1;
7262                         /* Count PCP with push_vlan command. */
7263                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
7264                         break;
7265                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
7266                         ret = flow_dv_validate_action_set_vlan_vid
7267                                                 (item_flags, action_flags,
7268                                                  actions, error);
7269                         if (ret < 0)
7270                                 return ret;
7271                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7272                                 modify_after_mirror = 1;
7273                         /* Count VID with push_vlan command. */
7274                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
7275                         rw_act_num += MLX5_ACT_NUM_MDF_VID;
7276                         break;
7277                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
7278                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
7279                         ret = flow_dv_validate_action_l2_encap(dev,
7280                                                                action_flags,
7281                                                                actions, attr,
7282                                                                error);
7283                         if (ret < 0)
7284                                 return ret;
7285                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
7286                         ++actions_n;
7287                         break;
7288                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
7289                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
7290                         ret = flow_dv_validate_action_decap(dev, action_flags,
7291                                                             actions, item_flags,
7292                                                             attr, error);
7293                         if (ret < 0)
7294                                 return ret;
7295                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7296                                 modify_after_mirror = 1;
7297                         action_flags |= MLX5_FLOW_ACTION_DECAP;
7298                         ++actions_n;
7299                         break;
7300                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
7301                         ret = flow_dv_validate_action_raw_encap_decap
7302                                 (dev, NULL, actions->conf, attr, &action_flags,
7303                                  &actions_n, actions, item_flags, error);
7304                         if (ret < 0)
7305                                 return ret;
7306                         break;
7307                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
7308                         decap = actions->conf;
7309                         while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
7310                                 ;
7311                         if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
7312                                 encap = NULL;
7313                                 actions--;
7314                         } else {
7315                                 encap = actions->conf;
7316                         }
7317                         ret = flow_dv_validate_action_raw_encap_decap
7318                                            (dev,
7319                                             decap ? decap : &empty_decap, encap,
7320                                             attr, &action_flags, &actions_n,
7321                                             actions, item_flags, error);
7322                         if (ret < 0)
7323                                 return ret;
7324                         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
7325                             (action_flags & MLX5_FLOW_ACTION_DECAP))
7326                                 modify_after_mirror = 1;
7327                         break;
7328                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
7329                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
7330                         ret = flow_dv_validate_action_modify_mac(action_flags,
7331                                                                  actions,
7332                                                                  item_flags,
7333                                                                  error);
7334                         if (ret < 0)
7335                                 return ret;
7336                         /* Count all modify-header actions as one action. */
7337                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7338                                 ++actions_n;
7339                         action_flags |= actions->type ==
7340                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
7341                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
7342                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
7343                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7344                                 modify_after_mirror = 1;
7345                         /*
7346                          * Even if the source and destination MAC addresses have
7347                          * overlap in the header with 4B alignment, the convert
7348                          * function will handle them separately and 4 SW actions
7349                          * will be created. And 2 actions will be added each
7350                          * time no matter how many bytes of address will be set.
7351                          */
7352                         rw_act_num += MLX5_ACT_NUM_MDF_MAC;
7353                         break;
7354                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
7355                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
7356                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
7357                                                                   actions,
7358                                                                   item_flags,
7359                                                                   error);
7360                         if (ret < 0)
7361                                 return ret;
7362                         /* Count all modify-header actions as one action. */
7363                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7364                                 ++actions_n;
7365                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7366                                 modify_after_mirror = 1;
7367                         action_flags |= actions->type ==
7368                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
7369                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
7370                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
7371                         rw_act_num += MLX5_ACT_NUM_MDF_IPV4;
7372                         break;
7373                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
7374                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
7375                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
7376                                                                   actions,
7377                                                                   item_flags,
7378                                                                   error);
7379                         if (ret < 0)
7380                                 return ret;
7381                         if (item_ipv6_proto == IPPROTO_ICMPV6)
7382                                 return rte_flow_error_set(error, ENOTSUP,
7383                                         RTE_FLOW_ERROR_TYPE_ACTION,
7384                                         actions,
7385                                         "Can't change header "
7386                                         "with ICMPv6 proto");
7387                         /* Count all modify-header actions as one action. */
7388                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7389                                 ++actions_n;
7390                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7391                                 modify_after_mirror = 1;
7392                         action_flags |= actions->type ==
7393                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
7394                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
7395                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
7396                         rw_act_num += MLX5_ACT_NUM_MDF_IPV6;
7397                         break;
7398                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
7399                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
7400                         ret = flow_dv_validate_action_modify_tp(action_flags,
7401                                                                 actions,
7402                                                                 item_flags,
7403                                                                 error);
7404                         if (ret < 0)
7405                                 return ret;
7406                         /* Count all modify-header actions as one action. */
7407                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7408                                 ++actions_n;
7409                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7410                                 modify_after_mirror = 1;
7411                         action_flags |= actions->type ==
7412                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
7413                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
7414                                                 MLX5_FLOW_ACTION_SET_TP_DST;
7415                         rw_act_num += MLX5_ACT_NUM_MDF_PORT;
7416                         break;
7417                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
7418                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
7419                         ret = flow_dv_validate_action_modify_ttl(action_flags,
7420                                                                  actions,
7421                                                                  item_flags,
7422                                                                  error);
7423                         if (ret < 0)
7424                                 return ret;
7425                         /* Count all modify-header actions as one action. */
7426                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7427                                 ++actions_n;
7428                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7429                                 modify_after_mirror = 1;
7430                         action_flags |= actions->type ==
7431                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
7432                                                 MLX5_FLOW_ACTION_SET_TTL :
7433                                                 MLX5_FLOW_ACTION_DEC_TTL;
7434                         rw_act_num += MLX5_ACT_NUM_MDF_TTL;
7435                         break;
7436                 case RTE_FLOW_ACTION_TYPE_JUMP:
7437                         ret = flow_dv_validate_action_jump(dev, tunnel, actions,
7438                                                            action_flags,
7439                                                            attr, external,
7440                                                            error);
7441                         if (ret)
7442                                 return ret;
7443                         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
7444                             fdb_mirror_limit)
7445                                 return rte_flow_error_set(error, EINVAL,
7446                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7447                                                   NULL,
7448                                                   "sample and jump action combination is not supported");
7449                         ++actions_n;
7450                         action_flags |= MLX5_FLOW_ACTION_JUMP;
7451                         break;
7452                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
7453                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
7454                         ret = flow_dv_validate_action_modify_tcp_seq
7455                                                                 (action_flags,
7456                                                                  actions,
7457                                                                  item_flags,
7458                                                                  error);
7459                         if (ret < 0)
7460                                 return ret;
7461                         /* Count all modify-header actions as one action. */
7462                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7463                                 ++actions_n;
7464                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7465                                 modify_after_mirror = 1;
7466                         action_flags |= actions->type ==
7467                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
7468                                                 MLX5_FLOW_ACTION_INC_TCP_SEQ :
7469                                                 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
7470                         rw_act_num += MLX5_ACT_NUM_MDF_TCPSEQ;
7471                         break;
7472                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
7473                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
7474                         ret = flow_dv_validate_action_modify_tcp_ack
7475                                                                 (action_flags,
7476                                                                  actions,
7477                                                                  item_flags,
7478                                                                  error);
7479                         if (ret < 0)
7480                                 return ret;
7481                         /* Count all modify-header actions as one action. */
7482                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7483                                 ++actions_n;
7484                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7485                                 modify_after_mirror = 1;
7486                         action_flags |= actions->type ==
7487                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
7488                                                 MLX5_FLOW_ACTION_INC_TCP_ACK :
7489                                                 MLX5_FLOW_ACTION_DEC_TCP_ACK;
7490                         rw_act_num += MLX5_ACT_NUM_MDF_TCPACK;
7491                         break;
7492                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
7493                         break;
7494                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
7495                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
7496                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7497                         break;
7498                 case RTE_FLOW_ACTION_TYPE_METER:
7499                         ret = mlx5_flow_validate_action_meter(dev,
7500                                                               action_flags,
7501                                                               actions, attr,
7502                                                               port_id_item,
7503                                                               &def_policy,
7504                                                               error);
7505                         if (ret < 0)
7506                                 return ret;
7507                         action_flags |= MLX5_FLOW_ACTION_METER;
7508                         if (!def_policy)
7509                                 action_flags |=
7510                                 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
7511                         ++actions_n;
7512                         /* Meter action will add one more TAG action. */
7513                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7514                         break;
7515                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
7516                         if (!attr->transfer && !attr->group)
7517                                 return rte_flow_error_set(error, ENOTSUP,
7518                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7519                                                                            NULL,
7520                           "Shared ASO age action is not supported for group 0");
7521                         if (action_flags & MLX5_FLOW_ACTION_AGE)
7522                                 return rte_flow_error_set
7523                                                   (error, EINVAL,
7524                                                    RTE_FLOW_ERROR_TYPE_ACTION,
7525                                                    NULL,
7526                                                    "duplicate age actions set");
7527                         action_flags |= MLX5_FLOW_ACTION_AGE;
7528                         ++actions_n;
7529                         break;
7530                 case RTE_FLOW_ACTION_TYPE_AGE:
7531                         ret = flow_dv_validate_action_age(action_flags,
7532                                                           actions, dev,
7533                                                           error);
7534                         if (ret < 0)
7535                                 return ret;
7536                         /*
7537                          * Validate the regular AGE action (using counter)
7538                          * mutual exclusion with share counter actions.
7539                          */
7540                         if (!priv->sh->flow_hit_aso_en) {
7541                                 if (shared_count)
7542                                         return rte_flow_error_set
7543                                                 (error, EINVAL,
7544                                                 RTE_FLOW_ERROR_TYPE_ACTION,
7545                                                 NULL,
7546                                                 "old age and shared count combination is not supported");
7547                                 if (sample_count)
7548                                         return rte_flow_error_set
7549                                                 (error, EINVAL,
7550                                                 RTE_FLOW_ERROR_TYPE_ACTION,
7551                                                 NULL,
7552                                                 "old age action and count must be in the same sub flow");
7553                         }
7554                         action_flags |= MLX5_FLOW_ACTION_AGE;
7555                         ++actions_n;
7556                         break;
7557                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
7558                         ret = flow_dv_validate_action_modify_ipv4_dscp
7559                                                          (action_flags,
7560                                                           actions,
7561                                                           item_flags,
7562                                                           error);
7563                         if (ret < 0)
7564                                 return ret;
7565                         /* Count all modify-header actions as one action. */
7566                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7567                                 ++actions_n;
7568                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7569                                 modify_after_mirror = 1;
7570                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
7571                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
7572                         break;
7573                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
7574                         ret = flow_dv_validate_action_modify_ipv6_dscp
7575                                                                 (action_flags,
7576                                                                  actions,
7577                                                                  item_flags,
7578                                                                  error);
7579                         if (ret < 0)
7580                                 return ret;
7581                         /* Count all modify-header actions as one action. */
7582                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7583                                 ++actions_n;
7584                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7585                                 modify_after_mirror = 1;
7586                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
7587                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
7588                         break;
7589                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
7590                         ret = flow_dv_validate_action_sample(&action_flags,
7591                                                              actions, dev,
7592                                                              attr, item_flags,
7593                                                              rss, &sample_rss,
7594                                                              &sample_count,
7595                                                              &fdb_mirror_limit,
7596                                                              error);
7597                         if (ret < 0)
7598                                 return ret;
7599                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
7600                         ++actions_n;
7601                         break;
7602                 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
7603                         ret = flow_dv_validate_action_modify_field(dev,
7604                                                                    action_flags,
7605                                                                    actions,
7606                                                                    attr,
7607                                                                    error);
7608                         if (ret < 0)
7609                                 return ret;
7610                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7611                                 modify_after_mirror = 1;
7612                         /* Count all modify-header actions as one action. */
7613                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7614                                 ++actions_n;
7615                         action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
7616                         rw_act_num += ret;
7617                         break;
7618                 case RTE_FLOW_ACTION_TYPE_CONNTRACK:
7619                         ret = flow_dv_validate_action_aso_ct(dev, action_flags,
7620                                                              item_flags, attr,
7621                                                              error);
7622                         if (ret < 0)
7623                                 return ret;
7624                         action_flags |= MLX5_FLOW_ACTION_CT;
7625                         break;
7626                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
7627                         /* tunnel offload action was processed before
7628                          * list it here as a supported type
7629                          */
7630                         break;
7631                 default:
7632                         return rte_flow_error_set(error, ENOTSUP,
7633                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7634                                                   actions,
7635                                                   "action not supported");
7636                 }
7637         }
7638         /*
7639          * Validate actions in flow rules
7640          * - Explicit decap action is prohibited by the tunnel offload API.
7641          * - Drop action in tunnel steer rule is prohibited by the API.
7642          * - Application cannot use MARK action because it's value can mask
7643          *   tunnel default miss nitification.
7644          * - JUMP in tunnel match rule has no support in current PMD
7645          *   implementation.
7646          * - TAG & META are reserved for future uses.
7647          */
7648         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_SET) {
7649                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_DECAP    |
7650                                             MLX5_FLOW_ACTION_MARK     |
7651                                             MLX5_FLOW_ACTION_SET_TAG  |
7652                                             MLX5_FLOW_ACTION_SET_META |
7653                                             MLX5_FLOW_ACTION_DROP;
7654
7655                 if (action_flags & bad_actions_mask)
7656                         return rte_flow_error_set
7657                                         (error, EINVAL,
7658                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7659                                         "Invalid RTE action in tunnel "
7660                                         "set decap rule");
7661                 if (!(action_flags & MLX5_FLOW_ACTION_JUMP))
7662                         return rte_flow_error_set
7663                                         (error, EINVAL,
7664                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7665                                         "tunnel set decap rule must terminate "
7666                                         "with JUMP");
7667                 if (!attr->ingress)
7668                         return rte_flow_error_set
7669                                         (error, EINVAL,
7670                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7671                                         "tunnel flows for ingress traffic only");
7672         }
7673         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_MATCH) {
7674                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_JUMP    |
7675                                             MLX5_FLOW_ACTION_MARK    |
7676                                             MLX5_FLOW_ACTION_SET_TAG |
7677                                             MLX5_FLOW_ACTION_SET_META;
7678
7679                 if (action_flags & bad_actions_mask)
7680                         return rte_flow_error_set
7681                                         (error, EINVAL,
7682                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7683                                         "Invalid RTE action in tunnel "
7684                                         "set match rule");
7685         }
7686         /*
7687          * Validate the drop action mutual exclusion with other actions.
7688          * Drop action is mutually-exclusive with any other action, except for
7689          * Count action.
7690          * Drop action compatibility with tunnel offload was already validated.
7691          */
7692         if (action_flags & (MLX5_FLOW_ACTION_TUNNEL_MATCH |
7693                             MLX5_FLOW_ACTION_TUNNEL_MATCH));
7694         else if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
7695             (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
7696                 return rte_flow_error_set(error, EINVAL,
7697                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7698                                           "Drop action is mutually-exclusive "
7699                                           "with any other action, except for "
7700                                           "Count action");
7701         /* Eswitch has few restrictions on using items and actions */
7702         if (attr->transfer) {
7703                 if (!mlx5_flow_ext_mreg_supported(dev) &&
7704                     action_flags & MLX5_FLOW_ACTION_FLAG)
7705                         return rte_flow_error_set(error, ENOTSUP,
7706                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7707                                                   NULL,
7708                                                   "unsupported action FLAG");
7709                 if (!mlx5_flow_ext_mreg_supported(dev) &&
7710                     action_flags & MLX5_FLOW_ACTION_MARK)
7711                         return rte_flow_error_set(error, ENOTSUP,
7712                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7713                                                   NULL,
7714                                                   "unsupported action MARK");
7715                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
7716                         return rte_flow_error_set(error, ENOTSUP,
7717                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7718                                                   NULL,
7719                                                   "unsupported action QUEUE");
7720                 if (action_flags & MLX5_FLOW_ACTION_RSS)
7721                         return rte_flow_error_set(error, ENOTSUP,
7722                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7723                                                   NULL,
7724                                                   "unsupported action RSS");
7725                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
7726                         return rte_flow_error_set(error, EINVAL,
7727                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7728                                                   actions,
7729                                                   "no fate action is found");
7730         } else {
7731                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
7732                         return rte_flow_error_set(error, EINVAL,
7733                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7734                                                   actions,
7735                                                   "no fate action is found");
7736         }
7737         /*
7738          * Continue validation for Xcap and VLAN actions.
7739          * If hairpin is working in explicit TX rule mode, there is no actions
7740          * splitting and the validation of hairpin ingress flow should be the
7741          * same as other standard flows.
7742          */
7743         if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS |
7744                              MLX5_FLOW_VLAN_ACTIONS)) &&
7745             (queue_index == 0xFFFF ||
7746              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN ||
7747              ((conf = mlx5_rxq_get_hairpin_conf(dev, queue_index)) != NULL &&
7748              conf->tx_explicit != 0))) {
7749                 if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
7750                     MLX5_FLOW_XCAP_ACTIONS)
7751                         return rte_flow_error_set(error, ENOTSUP,
7752                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7753                                                   NULL, "encap and decap "
7754                                                   "combination aren't supported");
7755                 if (!attr->transfer && attr->ingress) {
7756                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
7757                                 return rte_flow_error_set
7758                                                 (error, ENOTSUP,
7759                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7760                                                  NULL, "encap is not supported"
7761                                                  " for ingress traffic");
7762                         else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
7763                                 return rte_flow_error_set
7764                                                 (error, ENOTSUP,
7765                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7766                                                  NULL, "push VLAN action not "
7767                                                  "supported for ingress");
7768                         else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) ==
7769                                         MLX5_FLOW_VLAN_ACTIONS)
7770                                 return rte_flow_error_set
7771                                                 (error, ENOTSUP,
7772                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7773                                                  NULL, "no support for "
7774                                                  "multiple VLAN actions");
7775                 }
7776         }
7777         if (action_flags & MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY) {
7778                 if ((action_flags & (MLX5_FLOW_FATE_ACTIONS &
7779                         ~MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)) &&
7780                         attr->ingress)
7781                         return rte_flow_error_set
7782                                 (error, ENOTSUP,
7783                                 RTE_FLOW_ERROR_TYPE_ACTION,
7784                                 NULL, "fate action not supported for "
7785                                 "meter with policy");
7786                 if (attr->egress) {
7787                         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
7788                                 return rte_flow_error_set
7789                                         (error, ENOTSUP,
7790                                         RTE_FLOW_ERROR_TYPE_ACTION,
7791                                         NULL, "modify header action in egress "
7792                                         "cannot be done before meter action");
7793                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
7794                                 return rte_flow_error_set
7795                                         (error, ENOTSUP,
7796                                         RTE_FLOW_ERROR_TYPE_ACTION,
7797                                         NULL, "encap action in egress "
7798                                         "cannot be done before meter action");
7799                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
7800                                 return rte_flow_error_set
7801                                         (error, ENOTSUP,
7802                                         RTE_FLOW_ERROR_TYPE_ACTION,
7803                                         NULL, "push vlan action in egress "
7804                                         "cannot be done before meter action");
7805                 }
7806         }
7807         /*
7808          * Hairpin flow will add one more TAG action in TX implicit mode.
7809          * In TX explicit mode, there will be no hairpin flow ID.
7810          */
7811         if (hairpin > 0)
7812                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
7813         /* extra metadata enabled: one more TAG action will be add. */
7814         if (dev_conf->dv_flow_en &&
7815             dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
7816             mlx5_flow_ext_mreg_supported(dev))
7817                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
7818         if (rw_act_num >
7819                         flow_dv_modify_hdr_action_max(dev, is_root)) {
7820                 return rte_flow_error_set(error, ENOTSUP,
7821                                           RTE_FLOW_ERROR_TYPE_ACTION,
7822                                           NULL, "too many header modify"
7823                                           " actions to support");
7824         }
7825         /* Eswitch egress mirror and modify flow has limitation on CX5 */
7826         if (fdb_mirror_limit && modify_after_mirror)
7827                 return rte_flow_error_set(error, EINVAL,
7828                                 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7829                                 "sample before modify action is not supported");
7830         return 0;
7831 }
7832
7833 /**
7834  * Internal preparation function. Allocates the DV flow size,
7835  * this size is constant.
7836  *
7837  * @param[in] dev
7838  *   Pointer to the rte_eth_dev structure.
7839  * @param[in] attr
7840  *   Pointer to the flow attributes.
7841  * @param[in] items
7842  *   Pointer to the list of items.
7843  * @param[in] actions
7844  *   Pointer to the list of actions.
7845  * @param[out] error
7846  *   Pointer to the error structure.
7847  *
7848  * @return
7849  *   Pointer to mlx5_flow object on success,
7850  *   otherwise NULL and rte_errno is set.
7851  */
7852 static struct mlx5_flow *
7853 flow_dv_prepare(struct rte_eth_dev *dev,
7854                 const struct rte_flow_attr *attr __rte_unused,
7855                 const struct rte_flow_item items[] __rte_unused,
7856                 const struct rte_flow_action actions[] __rte_unused,
7857                 struct rte_flow_error *error)
7858 {
7859         uint32_t handle_idx = 0;
7860         struct mlx5_flow *dev_flow;
7861         struct mlx5_flow_handle *dev_handle;
7862         struct mlx5_priv *priv = dev->data->dev_private;
7863         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
7864
7865         MLX5_ASSERT(wks);
7866         wks->skip_matcher_reg = 0;
7867         /* In case of corrupting the memory. */
7868         if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
7869                 rte_flow_error_set(error, ENOSPC,
7870                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7871                                    "not free temporary device flow");
7872                 return NULL;
7873         }
7874         dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
7875                                    &handle_idx);
7876         if (!dev_handle) {
7877                 rte_flow_error_set(error, ENOMEM,
7878                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7879                                    "not enough memory to create flow handle");
7880                 return NULL;
7881         }
7882         MLX5_ASSERT(wks->flow_idx < RTE_DIM(wks->flows));
7883         dev_flow = &wks->flows[wks->flow_idx++];
7884         memset(dev_flow, 0, sizeof(*dev_flow));
7885         dev_flow->handle = dev_handle;
7886         dev_flow->handle_idx = handle_idx;
7887         /*
7888          * In some old rdma-core releases, before continuing, a check of the
7889          * length of matching parameter will be done at first. It needs to use
7890          * the length without misc4 param. If the flow has misc4 support, then
7891          * the length needs to be adjusted accordingly. Each param member is
7892          * aligned with a 64B boundary naturally.
7893          */
7894         dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param) -
7895                                   MLX5_ST_SZ_BYTES(fte_match_set_misc4);
7896         dev_flow->ingress = attr->ingress;
7897         dev_flow->dv.transfer = attr->transfer;
7898         return dev_flow;
7899 }
7900
7901 #ifdef RTE_LIBRTE_MLX5_DEBUG
7902 /**
7903  * Sanity check for match mask and value. Similar to check_valid_spec() in
7904  * kernel driver. If unmasked bit is present in value, it returns failure.
7905  *
7906  * @param match_mask
7907  *   pointer to match mask buffer.
7908  * @param match_value
7909  *   pointer to match value buffer.
7910  *
7911  * @return
7912  *   0 if valid, -EINVAL otherwise.
7913  */
7914 static int
7915 flow_dv_check_valid_spec(void *match_mask, void *match_value)
7916 {
7917         uint8_t *m = match_mask;
7918         uint8_t *v = match_value;
7919         unsigned int i;
7920
7921         for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
7922                 if (v[i] & ~m[i]) {
7923                         DRV_LOG(ERR,
7924                                 "match_value differs from match_criteria"
7925                                 " %p[%u] != %p[%u]",
7926                                 match_value, i, match_mask, i);
7927                         return -EINVAL;
7928                 }
7929         }
7930         return 0;
7931 }
7932 #endif
7933
7934 /**
7935  * Add match of ip_version.
7936  *
7937  * @param[in] group
7938  *   Flow group.
7939  * @param[in] headers_v
7940  *   Values header pointer.
7941  * @param[in] headers_m
7942  *   Masks header pointer.
7943  * @param[in] ip_version
7944  *   The IP version to set.
7945  */
7946 static inline void
7947 flow_dv_set_match_ip_version(uint32_t group,
7948                              void *headers_v,
7949                              void *headers_m,
7950                              uint8_t ip_version)
7951 {
7952         if (group == 0)
7953                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
7954         else
7955                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
7956                          ip_version);
7957         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version);
7958         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0);
7959         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0);
7960 }
7961
7962 /**
7963  * Add Ethernet item to matcher and to the value.
7964  *
7965  * @param[in, out] matcher
7966  *   Flow matcher.
7967  * @param[in, out] key
7968  *   Flow matcher value.
7969  * @param[in] item
7970  *   Flow pattern to translate.
7971  * @param[in] inner
7972  *   Item is inner pattern.
7973  */
7974 static void
7975 flow_dv_translate_item_eth(void *matcher, void *key,
7976                            const struct rte_flow_item *item, int inner,
7977                            uint32_t group)
7978 {
7979         const struct rte_flow_item_eth *eth_m = item->mask;
7980         const struct rte_flow_item_eth *eth_v = item->spec;
7981         const struct rte_flow_item_eth nic_mask = {
7982                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
7983                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
7984                 .type = RTE_BE16(0xffff),
7985                 .has_vlan = 0,
7986         };
7987         void *hdrs_m;
7988         void *hdrs_v;
7989         char *l24_v;
7990         unsigned int i;
7991
7992         if (!eth_v)
7993                 return;
7994         if (!eth_m)
7995                 eth_m = &nic_mask;
7996         if (inner) {
7997                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
7998                                          inner_headers);
7999                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8000         } else {
8001                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8002                                          outer_headers);
8003                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8004         }
8005         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, dmac_47_16),
8006                &eth_m->dst, sizeof(eth_m->dst));
8007         /* The value must be in the range of the mask. */
8008         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, dmac_47_16);
8009         for (i = 0; i < sizeof(eth_m->dst); ++i)
8010                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
8011         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, smac_47_16),
8012                &eth_m->src, sizeof(eth_m->src));
8013         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, smac_47_16);
8014         /* The value must be in the range of the mask. */
8015         for (i = 0; i < sizeof(eth_m->dst); ++i)
8016                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
8017         /*
8018          * HW supports match on one Ethertype, the Ethertype following the last
8019          * VLAN tag of the packet (see PRM).
8020          * Set match on ethertype only if ETH header is not followed by VLAN.
8021          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
8022          * ethertype, and use ip_version field instead.
8023          * eCPRI over Ether layer will use type value 0xAEFE.
8024          */
8025         if (eth_m->type == 0xFFFF) {
8026                 /* Set cvlan_tag mask for any single\multi\un-tagged case. */
8027                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8028                 switch (eth_v->type) {
8029                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
8030                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8031                         return;
8032                 case RTE_BE16(RTE_ETHER_TYPE_QINQ):
8033                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8034                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8035                         return;
8036                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
8037                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
8038                         return;
8039                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
8040                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
8041                         return;
8042                 default:
8043                         break;
8044                 }
8045         }
8046         if (eth_m->has_vlan) {
8047                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8048                 if (eth_v->has_vlan) {
8049                         /*
8050                          * Here, when also has_more_vlan field in VLAN item is
8051                          * not set, only single-tagged packets will be matched.
8052                          */
8053                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8054                         return;
8055                 }
8056         }
8057         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
8058                  rte_be_to_cpu_16(eth_m->type));
8059         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
8060         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
8061 }
8062
8063 /**
8064  * Add VLAN item to matcher and to the value.
8065  *
8066  * @param[in, out] dev_flow
8067  *   Flow descriptor.
8068  * @param[in, out] matcher
8069  *   Flow matcher.
8070  * @param[in, out] key
8071  *   Flow matcher value.
8072  * @param[in] item
8073  *   Flow pattern to translate.
8074  * @param[in] inner
8075  *   Item is inner pattern.
8076  */
8077 static void
8078 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
8079                             void *matcher, void *key,
8080                             const struct rte_flow_item *item,
8081                             int inner, uint32_t group)
8082 {
8083         const struct rte_flow_item_vlan *vlan_m = item->mask;
8084         const struct rte_flow_item_vlan *vlan_v = item->spec;
8085         void *hdrs_m;
8086         void *hdrs_v;
8087         uint16_t tci_m;
8088         uint16_t tci_v;
8089
8090         if (inner) {
8091                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8092                                          inner_headers);
8093                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8094         } else {
8095                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8096                                          outer_headers);
8097                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8098                 /*
8099                  * This is workaround, masks are not supported,
8100                  * and pre-validated.
8101                  */
8102                 if (vlan_v)
8103                         dev_flow->handle->vf_vlan.tag =
8104                                         rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
8105         }
8106         /*
8107          * When VLAN item exists in flow, mark packet as tagged,
8108          * even if TCI is not specified.
8109          */
8110         if (!MLX5_GET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag)) {
8111                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8112                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8113         }
8114         if (!vlan_v)
8115                 return;
8116         if (!vlan_m)
8117                 vlan_m = &rte_flow_item_vlan_mask;
8118         tci_m = rte_be_to_cpu_16(vlan_m->tci);
8119         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
8120         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_vid, tci_m);
8121         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_vid, tci_v);
8122         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_cfi, tci_m >> 12);
8123         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_cfi, tci_v >> 12);
8124         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_prio, tci_m >> 13);
8125         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_prio, tci_v >> 13);
8126         /*
8127          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
8128          * ethertype, and use ip_version field instead.
8129          */
8130         if (vlan_m->inner_type == 0xFFFF) {
8131                 switch (vlan_v->inner_type) {
8132                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
8133                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8134                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8135                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
8136                         return;
8137                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
8138                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
8139                         return;
8140                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
8141                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
8142                         return;
8143                 default:
8144                         break;
8145                 }
8146         }
8147         if (vlan_m->has_more_vlan && vlan_v->has_more_vlan) {
8148                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8149                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8150                 /* Only one vlan_tag bit can be set. */
8151                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
8152                 return;
8153         }
8154         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
8155                  rte_be_to_cpu_16(vlan_m->inner_type));
8156         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, ethertype,
8157                  rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
8158 }
8159
8160 /**
8161  * Add IPV4 item to matcher and to the value.
8162  *
8163  * @param[in, out] matcher
8164  *   Flow matcher.
8165  * @param[in, out] key
8166  *   Flow matcher value.
8167  * @param[in] item
8168  *   Flow pattern to translate.
8169  * @param[in] inner
8170  *   Item is inner pattern.
8171  * @param[in] group
8172  *   The group to insert the rule.
8173  */
8174 static void
8175 flow_dv_translate_item_ipv4(void *matcher, void *key,
8176                             const struct rte_flow_item *item,
8177                             int inner, uint32_t group)
8178 {
8179         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
8180         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
8181         const struct rte_flow_item_ipv4 nic_mask = {
8182                 .hdr = {
8183                         .src_addr = RTE_BE32(0xffffffff),
8184                         .dst_addr = RTE_BE32(0xffffffff),
8185                         .type_of_service = 0xff,
8186                         .next_proto_id = 0xff,
8187                         .time_to_live = 0xff,
8188                 },
8189         };
8190         void *headers_m;
8191         void *headers_v;
8192         char *l24_m;
8193         char *l24_v;
8194         uint8_t tos;
8195
8196         if (inner) {
8197                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8198                                          inner_headers);
8199                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8200         } else {
8201                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8202                                          outer_headers);
8203                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8204         }
8205         flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
8206         if (!ipv4_v)
8207                 return;
8208         if (!ipv4_m)
8209                 ipv4_m = &nic_mask;
8210         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8211                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
8212         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8213                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
8214         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
8215         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
8216         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8217                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
8218         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8219                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
8220         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
8221         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
8222         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
8223         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
8224                  ipv4_m->hdr.type_of_service);
8225         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
8226         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
8227                  ipv4_m->hdr.type_of_service >> 2);
8228         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
8229         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8230                  ipv4_m->hdr.next_proto_id);
8231         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8232                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
8233         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
8234                  ipv4_m->hdr.time_to_live);
8235         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
8236                  ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
8237         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
8238                  !!(ipv4_m->hdr.fragment_offset));
8239         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
8240                  !!(ipv4_v->hdr.fragment_offset & ipv4_m->hdr.fragment_offset));
8241 }
8242
8243 /**
8244  * Add IPV6 item to matcher and to the value.
8245  *
8246  * @param[in, out] matcher
8247  *   Flow matcher.
8248  * @param[in, out] key
8249  *   Flow matcher value.
8250  * @param[in] item
8251  *   Flow pattern to translate.
8252  * @param[in] inner
8253  *   Item is inner pattern.
8254  * @param[in] group
8255  *   The group to insert the rule.
8256  */
8257 static void
8258 flow_dv_translate_item_ipv6(void *matcher, void *key,
8259                             const struct rte_flow_item *item,
8260                             int inner, uint32_t group)
8261 {
8262         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
8263         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
8264         const struct rte_flow_item_ipv6 nic_mask = {
8265                 .hdr = {
8266                         .src_addr =
8267                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
8268                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
8269                         .dst_addr =
8270                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
8271                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
8272                         .vtc_flow = RTE_BE32(0xffffffff),
8273                         .proto = 0xff,
8274                         .hop_limits = 0xff,
8275                 },
8276         };
8277         void *headers_m;
8278         void *headers_v;
8279         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8280         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8281         char *l24_m;
8282         char *l24_v;
8283         uint32_t vtc_m;
8284         uint32_t vtc_v;
8285         int i;
8286         int size;
8287
8288         if (inner) {
8289                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8290                                          inner_headers);
8291                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8292         } else {
8293                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8294                                          outer_headers);
8295                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8296         }
8297         flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
8298         if (!ipv6_v)
8299                 return;
8300         if (!ipv6_m)
8301                 ipv6_m = &nic_mask;
8302         size = sizeof(ipv6_m->hdr.dst_addr);
8303         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8304                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
8305         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8306                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
8307         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
8308         for (i = 0; i < size; ++i)
8309                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
8310         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8311                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
8312         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8313                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
8314         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
8315         for (i = 0; i < size; ++i)
8316                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
8317         /* TOS. */
8318         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
8319         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
8320         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
8321         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
8322         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
8323         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
8324         /* Label. */
8325         if (inner) {
8326                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
8327                          vtc_m);
8328                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
8329                          vtc_v);
8330         } else {
8331                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
8332                          vtc_m);
8333                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
8334                          vtc_v);
8335         }
8336         /* Protocol. */
8337         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8338                  ipv6_m->hdr.proto);
8339         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8340                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
8341         /* Hop limit. */
8342         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
8343                  ipv6_m->hdr.hop_limits);
8344         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
8345                  ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
8346         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
8347                  !!(ipv6_m->has_frag_ext));
8348         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
8349                  !!(ipv6_v->has_frag_ext & ipv6_m->has_frag_ext));
8350 }
8351
8352 /**
8353  * Add IPV6 fragment extension item to matcher and to the value.
8354  *
8355  * @param[in, out] matcher
8356  *   Flow matcher.
8357  * @param[in, out] key
8358  *   Flow matcher value.
8359  * @param[in] item
8360  *   Flow pattern to translate.
8361  * @param[in] inner
8362  *   Item is inner pattern.
8363  */
8364 static void
8365 flow_dv_translate_item_ipv6_frag_ext(void *matcher, void *key,
8366                                      const struct rte_flow_item *item,
8367                                      int inner)
8368 {
8369         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_m = item->mask;
8370         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_v = item->spec;
8371         const struct rte_flow_item_ipv6_frag_ext nic_mask = {
8372                 .hdr = {
8373                         .next_header = 0xff,
8374                         .frag_data = RTE_BE16(0xffff),
8375                 },
8376         };
8377         void *headers_m;
8378         void *headers_v;
8379
8380         if (inner) {
8381                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8382                                          inner_headers);
8383                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8384         } else {
8385                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8386                                          outer_headers);
8387                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8388         }
8389         /* IPv6 fragment extension item exists, so packet is IP fragment. */
8390         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
8391         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 1);
8392         if (!ipv6_frag_ext_v)
8393                 return;
8394         if (!ipv6_frag_ext_m)
8395                 ipv6_frag_ext_m = &nic_mask;
8396         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8397                  ipv6_frag_ext_m->hdr.next_header);
8398         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8399                  ipv6_frag_ext_v->hdr.next_header &
8400                  ipv6_frag_ext_m->hdr.next_header);
8401 }
8402
8403 /**
8404  * Add TCP item to matcher and to the value.
8405  *
8406  * @param[in, out] matcher
8407  *   Flow matcher.
8408  * @param[in, out] key
8409  *   Flow matcher value.
8410  * @param[in] item
8411  *   Flow pattern to translate.
8412  * @param[in] inner
8413  *   Item is inner pattern.
8414  */
8415 static void
8416 flow_dv_translate_item_tcp(void *matcher, void *key,
8417                            const struct rte_flow_item *item,
8418                            int inner)
8419 {
8420         const struct rte_flow_item_tcp *tcp_m = item->mask;
8421         const struct rte_flow_item_tcp *tcp_v = item->spec;
8422         void *headers_m;
8423         void *headers_v;
8424
8425         if (inner) {
8426                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8427                                          inner_headers);
8428                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8429         } else {
8430                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8431                                          outer_headers);
8432                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8433         }
8434         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8435         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
8436         if (!tcp_v)
8437                 return;
8438         if (!tcp_m)
8439                 tcp_m = &rte_flow_item_tcp_mask;
8440         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
8441                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
8442         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
8443                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
8444         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
8445                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
8446         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
8447                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
8448         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
8449                  tcp_m->hdr.tcp_flags);
8450         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
8451                  (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
8452 }
8453
8454 /**
8455  * Add UDP item to matcher and to the value.
8456  *
8457  * @param[in, out] matcher
8458  *   Flow matcher.
8459  * @param[in, out] key
8460  *   Flow matcher value.
8461  * @param[in] item
8462  *   Flow pattern to translate.
8463  * @param[in] inner
8464  *   Item is inner pattern.
8465  */
8466 static void
8467 flow_dv_translate_item_udp(void *matcher, void *key,
8468                            const struct rte_flow_item *item,
8469                            int inner)
8470 {
8471         const struct rte_flow_item_udp *udp_m = item->mask;
8472         const struct rte_flow_item_udp *udp_v = item->spec;
8473         void *headers_m;
8474         void *headers_v;
8475
8476         if (inner) {
8477                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8478                                          inner_headers);
8479                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8480         } else {
8481                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8482                                          outer_headers);
8483                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8484         }
8485         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8486         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
8487         if (!udp_v)
8488                 return;
8489         if (!udp_m)
8490                 udp_m = &rte_flow_item_udp_mask;
8491         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
8492                  rte_be_to_cpu_16(udp_m->hdr.src_port));
8493         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
8494                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
8495         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
8496                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
8497         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
8498                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
8499 }
8500
8501 /**
8502  * Add GRE optional Key item to matcher and to the value.
8503  *
8504  * @param[in, out] matcher
8505  *   Flow matcher.
8506  * @param[in, out] key
8507  *   Flow matcher value.
8508  * @param[in] item
8509  *   Flow pattern to translate.
8510  * @param[in] inner
8511  *   Item is inner pattern.
8512  */
8513 static void
8514 flow_dv_translate_item_gre_key(void *matcher, void *key,
8515                                    const struct rte_flow_item *item)
8516 {
8517         const rte_be32_t *key_m = item->mask;
8518         const rte_be32_t *key_v = item->spec;
8519         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8520         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8521         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
8522
8523         /* GRE K bit must be on and should already be validated */
8524         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
8525         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
8526         if (!key_v)
8527                 return;
8528         if (!key_m)
8529                 key_m = &gre_key_default_mask;
8530         MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
8531                  rte_be_to_cpu_32(*key_m) >> 8);
8532         MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
8533                  rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
8534         MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
8535                  rte_be_to_cpu_32(*key_m) & 0xFF);
8536         MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
8537                  rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
8538 }
8539
8540 /**
8541  * Add GRE item to matcher and to the value.
8542  *
8543  * @param[in, out] matcher
8544  *   Flow matcher.
8545  * @param[in, out] key
8546  *   Flow matcher value.
8547  * @param[in] item
8548  *   Flow pattern to translate.
8549  * @param[in] inner
8550  *   Item is inner pattern.
8551  */
8552 static void
8553 flow_dv_translate_item_gre(void *matcher, void *key,
8554                            const struct rte_flow_item *item,
8555                            int inner)
8556 {
8557         const struct rte_flow_item_gre *gre_m = item->mask;
8558         const struct rte_flow_item_gre *gre_v = item->spec;
8559         void *headers_m;
8560         void *headers_v;
8561         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8562         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8563         struct {
8564                 union {
8565                         __extension__
8566                         struct {
8567                                 uint16_t version:3;
8568                                 uint16_t rsvd0:9;
8569                                 uint16_t s_present:1;
8570                                 uint16_t k_present:1;
8571                                 uint16_t rsvd_bit1:1;
8572                                 uint16_t c_present:1;
8573                         };
8574                         uint16_t value;
8575                 };
8576         } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
8577
8578         if (inner) {
8579                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8580                                          inner_headers);
8581                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8582         } else {
8583                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8584                                          outer_headers);
8585                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8586         }
8587         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8588         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
8589         if (!gre_v)
8590                 return;
8591         if (!gre_m)
8592                 gre_m = &rte_flow_item_gre_mask;
8593         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
8594                  rte_be_to_cpu_16(gre_m->protocol));
8595         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
8596                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
8597         gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
8598         gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
8599         MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
8600                  gre_crks_rsvd0_ver_m.c_present);
8601         MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
8602                  gre_crks_rsvd0_ver_v.c_present &
8603                  gre_crks_rsvd0_ver_m.c_present);
8604         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
8605                  gre_crks_rsvd0_ver_m.k_present);
8606         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
8607                  gre_crks_rsvd0_ver_v.k_present &
8608                  gre_crks_rsvd0_ver_m.k_present);
8609         MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
8610                  gre_crks_rsvd0_ver_m.s_present);
8611         MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
8612                  gre_crks_rsvd0_ver_v.s_present &
8613                  gre_crks_rsvd0_ver_m.s_present);
8614 }
8615
8616 /**
8617  * Add NVGRE item to matcher and to the value.
8618  *
8619  * @param[in, out] matcher
8620  *   Flow matcher.
8621  * @param[in, out] key
8622  *   Flow matcher value.
8623  * @param[in] item
8624  *   Flow pattern to translate.
8625  * @param[in] inner
8626  *   Item is inner pattern.
8627  */
8628 static void
8629 flow_dv_translate_item_nvgre(void *matcher, void *key,
8630                              const struct rte_flow_item *item,
8631                              int inner)
8632 {
8633         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
8634         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
8635         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8636         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8637         const char *tni_flow_id_m;
8638         const char *tni_flow_id_v;
8639         char *gre_key_m;
8640         char *gre_key_v;
8641         int size;
8642         int i;
8643
8644         /* For NVGRE, GRE header fields must be set with defined values. */
8645         const struct rte_flow_item_gre gre_spec = {
8646                 .c_rsvd0_ver = RTE_BE16(0x2000),
8647                 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
8648         };
8649         const struct rte_flow_item_gre gre_mask = {
8650                 .c_rsvd0_ver = RTE_BE16(0xB000),
8651                 .protocol = RTE_BE16(UINT16_MAX),
8652         };
8653         const struct rte_flow_item gre_item = {
8654                 .spec = &gre_spec,
8655                 .mask = &gre_mask,
8656                 .last = NULL,
8657         };
8658         flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
8659         if (!nvgre_v)
8660                 return;
8661         if (!nvgre_m)
8662                 nvgre_m = &rte_flow_item_nvgre_mask;
8663         tni_flow_id_m = (const char *)nvgre_m->tni;
8664         tni_flow_id_v = (const char *)nvgre_v->tni;
8665         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
8666         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
8667         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
8668         memcpy(gre_key_m, tni_flow_id_m, size);
8669         for (i = 0; i < size; ++i)
8670                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
8671 }
8672
8673 /**
8674  * Add VXLAN item to matcher and to the value.
8675  *
8676  * @param[in, out] matcher
8677  *   Flow matcher.
8678  * @param[in, out] key
8679  *   Flow matcher value.
8680  * @param[in] item
8681  *   Flow pattern to translate.
8682  * @param[in] inner
8683  *   Item is inner pattern.
8684  */
8685 static void
8686 flow_dv_translate_item_vxlan(void *matcher, void *key,
8687                              const struct rte_flow_item *item,
8688                              int inner)
8689 {
8690         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
8691         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
8692         void *headers_m;
8693         void *headers_v;
8694         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8695         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8696         char *vni_m;
8697         char *vni_v;
8698         uint16_t dport;
8699         int size;
8700         int i;
8701
8702         if (inner) {
8703                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8704                                          inner_headers);
8705                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8706         } else {
8707                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8708                                          outer_headers);
8709                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8710         }
8711         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
8712                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
8713         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8714                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8715                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8716         }
8717         if (!vxlan_v)
8718                 return;
8719         if (!vxlan_m)
8720                 vxlan_m = &rte_flow_item_vxlan_mask;
8721         size = sizeof(vxlan_m->vni);
8722         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
8723         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
8724         memcpy(vni_m, vxlan_m->vni, size);
8725         for (i = 0; i < size; ++i)
8726                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
8727 }
8728
8729 /**
8730  * Add VXLAN-GPE item to matcher and to the value.
8731  *
8732  * @param[in, out] matcher
8733  *   Flow matcher.
8734  * @param[in, out] key
8735  *   Flow matcher value.
8736  * @param[in] item
8737  *   Flow pattern to translate.
8738  * @param[in] inner
8739  *   Item is inner pattern.
8740  */
8741
8742 static void
8743 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
8744                                  const struct rte_flow_item *item, int inner)
8745 {
8746         const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
8747         const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
8748         void *headers_m;
8749         void *headers_v;
8750         void *misc_m =
8751                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
8752         void *misc_v =
8753                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
8754         char *vni_m;
8755         char *vni_v;
8756         uint16_t dport;
8757         int size;
8758         int i;
8759         uint8_t flags_m = 0xff;
8760         uint8_t flags_v = 0xc;
8761
8762         if (inner) {
8763                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8764                                          inner_headers);
8765                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8766         } else {
8767                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8768                                          outer_headers);
8769                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8770         }
8771         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
8772                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
8773         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8774                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8775                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8776         }
8777         if (!vxlan_v)
8778                 return;
8779         if (!vxlan_m)
8780                 vxlan_m = &rte_flow_item_vxlan_gpe_mask;
8781         size = sizeof(vxlan_m->vni);
8782         vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
8783         vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
8784         memcpy(vni_m, vxlan_m->vni, size);
8785         for (i = 0; i < size; ++i)
8786                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
8787         if (vxlan_m->flags) {
8788                 flags_m = vxlan_m->flags;
8789                 flags_v = vxlan_v->flags;
8790         }
8791         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
8792         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
8793         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol,
8794                  vxlan_m->protocol);
8795         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol,
8796                  vxlan_v->protocol);
8797 }
8798
8799 /**
8800  * Add Geneve item to matcher and to the value.
8801  *
8802  * @param[in, out] matcher
8803  *   Flow matcher.
8804  * @param[in, out] key
8805  *   Flow matcher value.
8806  * @param[in] item
8807  *   Flow pattern to translate.
8808  * @param[in] inner
8809  *   Item is inner pattern.
8810  */
8811
8812 static void
8813 flow_dv_translate_item_geneve(void *matcher, void *key,
8814                               const struct rte_flow_item *item, int inner)
8815 {
8816         const struct rte_flow_item_geneve *geneve_m = item->mask;
8817         const struct rte_flow_item_geneve *geneve_v = item->spec;
8818         void *headers_m;
8819         void *headers_v;
8820         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8821         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8822         uint16_t dport;
8823         uint16_t gbhdr_m;
8824         uint16_t gbhdr_v;
8825         char *vni_m;
8826         char *vni_v;
8827         size_t size, i;
8828
8829         if (inner) {
8830                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8831                                          inner_headers);
8832                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8833         } else {
8834                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8835                                          outer_headers);
8836                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8837         }
8838         dport = MLX5_UDP_PORT_GENEVE;
8839         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8840                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8841                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8842         }
8843         if (!geneve_v)
8844                 return;
8845         if (!geneve_m)
8846                 geneve_m = &rte_flow_item_geneve_mask;
8847         size = sizeof(geneve_m->vni);
8848         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
8849         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
8850         memcpy(vni_m, geneve_m->vni, size);
8851         for (i = 0; i < size; ++i)
8852                 vni_v[i] = vni_m[i] & geneve_v->vni[i];
8853         MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
8854                  rte_be_to_cpu_16(geneve_m->protocol));
8855         MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
8856                  rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
8857         gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
8858         gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
8859         MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
8860                  MLX5_GENEVE_OAMF_VAL(gbhdr_m));
8861         MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
8862                  MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
8863         MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
8864                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
8865         MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
8866                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
8867                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
8868 }
8869
8870 /**
8871  * Create Geneve TLV option resource.
8872  *
8873  * @param dev[in, out]
8874  *   Pointer to rte_eth_dev structure.
8875  * @param[in, out] tag_be24
8876  *   Tag value in big endian then R-shift 8.
8877  * @parm[in, out] dev_flow
8878  *   Pointer to the dev_flow.
8879  * @param[out] error
8880  *   pointer to error structure.
8881  *
8882  * @return
8883  *   0 on success otherwise -errno and errno is set.
8884  */
8885
8886 int
8887 flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev,
8888                                              const struct rte_flow_item *item,
8889                                              struct rte_flow_error *error)
8890 {
8891         struct mlx5_priv *priv = dev->data->dev_private;
8892         struct mlx5_dev_ctx_shared *sh = priv->sh;
8893         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
8894                         sh->geneve_tlv_option_resource;
8895         struct mlx5_devx_obj *obj;
8896         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
8897         int ret = 0;
8898
8899         if (!geneve_opt_v)
8900                 return -1;
8901         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
8902         if (geneve_opt_resource != NULL) {
8903                 if (geneve_opt_resource->option_class ==
8904                         geneve_opt_v->option_class &&
8905                         geneve_opt_resource->option_type ==
8906                         geneve_opt_v->option_type &&
8907                         geneve_opt_resource->length ==
8908                         geneve_opt_v->option_len) {
8909                         /* We already have GENVE TLV option obj allocated. */
8910                         __atomic_fetch_add(&geneve_opt_resource->refcnt, 1,
8911                                            __ATOMIC_RELAXED);
8912                 } else {
8913                         ret = rte_flow_error_set(error, ENOMEM,
8914                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8915                                 "Only one GENEVE TLV option supported");
8916                         goto exit;
8917                 }
8918         } else {
8919                 /* Create a GENEVE TLV object and resource. */
8920                 obj = mlx5_devx_cmd_create_geneve_tlv_option(sh->ctx,
8921                                 geneve_opt_v->option_class,
8922                                 geneve_opt_v->option_type,
8923                                 geneve_opt_v->option_len);
8924                 if (!obj) {
8925                         ret = rte_flow_error_set(error, ENODATA,
8926                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8927                                 "Failed to create GENEVE TLV Devx object");
8928                         goto exit;
8929                 }
8930                 sh->geneve_tlv_option_resource =
8931                                 mlx5_malloc(MLX5_MEM_ZERO,
8932                                                 sizeof(*geneve_opt_resource),
8933                                                 0, SOCKET_ID_ANY);
8934                 if (!sh->geneve_tlv_option_resource) {
8935                         claim_zero(mlx5_devx_cmd_destroy(obj));
8936                         ret = rte_flow_error_set(error, ENOMEM,
8937                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8938                                 "GENEVE TLV object memory allocation failed");
8939                         goto exit;
8940                 }
8941                 geneve_opt_resource = sh->geneve_tlv_option_resource;
8942                 geneve_opt_resource->obj = obj;
8943                 geneve_opt_resource->option_class = geneve_opt_v->option_class;
8944                 geneve_opt_resource->option_type = geneve_opt_v->option_type;
8945                 geneve_opt_resource->length = geneve_opt_v->option_len;
8946                 __atomic_store_n(&geneve_opt_resource->refcnt, 1,
8947                                 __ATOMIC_RELAXED);
8948         }
8949 exit:
8950         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
8951         return ret;
8952 }
8953
8954 /**
8955  * Add Geneve TLV option item to matcher.
8956  *
8957  * @param[in, out] dev
8958  *   Pointer to rte_eth_dev structure.
8959  * @param[in, out] matcher
8960  *   Flow matcher.
8961  * @param[in, out] key
8962  *   Flow matcher value.
8963  * @param[in] item
8964  *   Flow pattern to translate.
8965  * @param[out] error
8966  *   Pointer to error structure.
8967  */
8968 static int
8969 flow_dv_translate_item_geneve_opt(struct rte_eth_dev *dev, void *matcher,
8970                                   void *key, const struct rte_flow_item *item,
8971                                   struct rte_flow_error *error)
8972 {
8973         const struct rte_flow_item_geneve_opt *geneve_opt_m = item->mask;
8974         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
8975         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8976         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8977         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
8978                         misc_parameters_3);
8979         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
8980         rte_be32_t opt_data_key = 0, opt_data_mask = 0;
8981         int ret = 0;
8982
8983         if (!geneve_opt_v)
8984                 return -1;
8985         if (!geneve_opt_m)
8986                 geneve_opt_m = &rte_flow_item_geneve_opt_mask;
8987         ret = flow_dev_geneve_tlv_option_resource_register(dev, item,
8988                                                            error);
8989         if (ret) {
8990                 DRV_LOG(ERR, "Failed to create geneve_tlv_obj");
8991                 return ret;
8992         }
8993         /*
8994          * Set the option length in GENEVE header if not requested.
8995          * The GENEVE TLV option length is expressed by the option length field
8996          * in the GENEVE header.
8997          * If the option length was not requested but the GENEVE TLV option item
8998          * is present we set the option length field implicitly.
8999          */
9000         if (!MLX5_GET16(fte_match_set_misc, misc_m, geneve_opt_len)) {
9001                 MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
9002                          MLX5_GENEVE_OPTLEN_MASK);
9003                 MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
9004                          geneve_opt_v->option_len + 1);
9005         }
9006         /* Set the data. */
9007         if (geneve_opt_v->data) {
9008                 memcpy(&opt_data_key, geneve_opt_v->data,
9009                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
9010                                 sizeof(opt_data_key)));
9011                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
9012                                 sizeof(opt_data_key));
9013                 memcpy(&opt_data_mask, geneve_opt_m->data,
9014                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
9015                                 sizeof(opt_data_mask)));
9016                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
9017                                 sizeof(opt_data_mask));
9018                 MLX5_SET(fte_match_set_misc3, misc3_m,
9019                                 geneve_tlv_option_0_data,
9020                                 rte_be_to_cpu_32(opt_data_mask));
9021                 MLX5_SET(fte_match_set_misc3, misc3_v,
9022                                 geneve_tlv_option_0_data,
9023                         rte_be_to_cpu_32(opt_data_key & opt_data_mask));
9024         }
9025         return ret;
9026 }
9027
9028 /**
9029  * Add MPLS item to matcher and to the value.
9030  *
9031  * @param[in, out] matcher
9032  *   Flow matcher.
9033  * @param[in, out] key
9034  *   Flow matcher value.
9035  * @param[in] item
9036  *   Flow pattern to translate.
9037  * @param[in] prev_layer
9038  *   The protocol layer indicated in previous item.
9039  * @param[in] inner
9040  *   Item is inner pattern.
9041  */
9042 static void
9043 flow_dv_translate_item_mpls(void *matcher, void *key,
9044                             const struct rte_flow_item *item,
9045                             uint64_t prev_layer,
9046                             int inner)
9047 {
9048         const uint32_t *in_mpls_m = item->mask;
9049         const uint32_t *in_mpls_v = item->spec;
9050         uint32_t *out_mpls_m = 0;
9051         uint32_t *out_mpls_v = 0;
9052         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9053         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9054         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
9055                                      misc_parameters_2);
9056         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
9057         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
9058         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9059
9060         switch (prev_layer) {
9061         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
9062                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
9063                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
9064                          MLX5_UDP_PORT_MPLS);
9065                 break;
9066         case MLX5_FLOW_LAYER_GRE:
9067                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
9068                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
9069                          RTE_ETHER_TYPE_MPLS);
9070                 break;
9071         default:
9072                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
9073                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
9074                          IPPROTO_MPLS);
9075                 break;
9076         }
9077         if (!in_mpls_v)
9078                 return;
9079         if (!in_mpls_m)
9080                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
9081         switch (prev_layer) {
9082         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
9083                 out_mpls_m =
9084                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
9085                                                  outer_first_mpls_over_udp);
9086                 out_mpls_v =
9087                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
9088                                                  outer_first_mpls_over_udp);
9089                 break;
9090         case MLX5_FLOW_LAYER_GRE:
9091                 out_mpls_m =
9092                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
9093                                                  outer_first_mpls_over_gre);
9094                 out_mpls_v =
9095                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
9096                                                  outer_first_mpls_over_gre);
9097                 break;
9098         default:
9099                 /* Inner MPLS not over GRE is not supported. */
9100                 if (!inner) {
9101                         out_mpls_m =
9102                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
9103                                                          misc2_m,
9104                                                          outer_first_mpls);
9105                         out_mpls_v =
9106                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
9107                                                          misc2_v,
9108                                                          outer_first_mpls);
9109                 }
9110                 break;
9111         }
9112         if (out_mpls_m && out_mpls_v) {
9113                 *out_mpls_m = *in_mpls_m;
9114                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
9115         }
9116 }
9117
9118 /**
9119  * Add metadata register item to matcher
9120  *
9121  * @param[in, out] matcher
9122  *   Flow matcher.
9123  * @param[in, out] key
9124  *   Flow matcher value.
9125  * @param[in] reg_type
9126  *   Type of device metadata register
9127  * @param[in] value
9128  *   Register value
9129  * @param[in] mask
9130  *   Register mask
9131  */
9132 static void
9133 flow_dv_match_meta_reg(void *matcher, void *key,
9134                        enum modify_reg reg_type,
9135                        uint32_t data, uint32_t mask)
9136 {
9137         void *misc2_m =
9138                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
9139         void *misc2_v =
9140                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
9141         uint32_t temp;
9142
9143         data &= mask;
9144         switch (reg_type) {
9145         case REG_A:
9146                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
9147                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
9148                 break;
9149         case REG_B:
9150                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
9151                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
9152                 break;
9153         case REG_C_0:
9154                 /*
9155                  * The metadata register C0 field might be divided into
9156                  * source vport index and META item value, we should set
9157                  * this field according to specified mask, not as whole one.
9158                  */
9159                 temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
9160                 temp |= mask;
9161                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
9162                 temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
9163                 temp &= ~mask;
9164                 temp |= data;
9165                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
9166                 break;
9167         case REG_C_1:
9168                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
9169                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
9170                 break;
9171         case REG_C_2:
9172                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
9173                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
9174                 break;
9175         case REG_C_3:
9176                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
9177                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
9178                 break;
9179         case REG_C_4:
9180                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
9181                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
9182                 break;
9183         case REG_C_5:
9184                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
9185                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
9186                 break;
9187         case REG_C_6:
9188                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
9189                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
9190                 break;
9191         case REG_C_7:
9192                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
9193                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
9194                 break;
9195         default:
9196                 MLX5_ASSERT(false);
9197                 break;
9198         }
9199 }
9200
9201 /**
9202  * Add MARK item to matcher
9203  *
9204  * @param[in] dev
9205  *   The device to configure through.
9206  * @param[in, out] matcher
9207  *   Flow matcher.
9208  * @param[in, out] key
9209  *   Flow matcher value.
9210  * @param[in] item
9211  *   Flow pattern to translate.
9212  */
9213 static void
9214 flow_dv_translate_item_mark(struct rte_eth_dev *dev,
9215                             void *matcher, void *key,
9216                             const struct rte_flow_item *item)
9217 {
9218         struct mlx5_priv *priv = dev->data->dev_private;
9219         const struct rte_flow_item_mark *mark;
9220         uint32_t value;
9221         uint32_t mask;
9222
9223         mark = item->mask ? (const void *)item->mask :
9224                             &rte_flow_item_mark_mask;
9225         mask = mark->id & priv->sh->dv_mark_mask;
9226         mark = (const void *)item->spec;
9227         MLX5_ASSERT(mark);
9228         value = mark->id & priv->sh->dv_mark_mask & mask;
9229         if (mask) {
9230                 enum modify_reg reg;
9231
9232                 /* Get the metadata register index for the mark. */
9233                 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
9234                 MLX5_ASSERT(reg > 0);
9235                 if (reg == REG_C_0) {
9236                         struct mlx5_priv *priv = dev->data->dev_private;
9237                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9238                         uint32_t shl_c0 = rte_bsf32(msk_c0);
9239
9240                         mask &= msk_c0;
9241                         mask <<= shl_c0;
9242                         value <<= shl_c0;
9243                 }
9244                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
9245         }
9246 }
9247
9248 /**
9249  * Add META item to matcher
9250  *
9251  * @param[in] dev
9252  *   The devich to configure through.
9253  * @param[in, out] matcher
9254  *   Flow matcher.
9255  * @param[in, out] key
9256  *   Flow matcher value.
9257  * @param[in] attr
9258  *   Attributes of flow that includes this item.
9259  * @param[in] item
9260  *   Flow pattern to translate.
9261  */
9262 static void
9263 flow_dv_translate_item_meta(struct rte_eth_dev *dev,
9264                             void *matcher, void *key,
9265                             const struct rte_flow_attr *attr,
9266                             const struct rte_flow_item *item)
9267 {
9268         const struct rte_flow_item_meta *meta_m;
9269         const struct rte_flow_item_meta *meta_v;
9270
9271         meta_m = (const void *)item->mask;
9272         if (!meta_m)
9273                 meta_m = &rte_flow_item_meta_mask;
9274         meta_v = (const void *)item->spec;
9275         if (meta_v) {
9276                 int reg;
9277                 uint32_t value = meta_v->data;
9278                 uint32_t mask = meta_m->data;
9279
9280                 reg = flow_dv_get_metadata_reg(dev, attr, NULL);
9281                 if (reg < 0)
9282                         return;
9283                 MLX5_ASSERT(reg != REG_NON);
9284                 if (reg == REG_C_0) {
9285                         struct mlx5_priv *priv = dev->data->dev_private;
9286                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9287                         uint32_t shl_c0 = rte_bsf32(msk_c0);
9288
9289                         mask &= msk_c0;
9290                         mask <<= shl_c0;
9291                         value <<= shl_c0;
9292                 }
9293                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
9294         }
9295 }
9296
9297 /**
9298  * Add vport metadata Reg C0 item to matcher
9299  *
9300  * @param[in, out] matcher
9301  *   Flow matcher.
9302  * @param[in, out] key
9303  *   Flow matcher value.
9304  * @param[in] reg
9305  *   Flow pattern to translate.
9306  */
9307 static void
9308 flow_dv_translate_item_meta_vport(void *matcher, void *key,
9309                                   uint32_t value, uint32_t mask)
9310 {
9311         flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
9312 }
9313
9314 /**
9315  * Add tag item to matcher
9316  *
9317  * @param[in] dev
9318  *   The devich to configure through.
9319  * @param[in, out] matcher
9320  *   Flow matcher.
9321  * @param[in, out] key
9322  *   Flow matcher value.
9323  * @param[in] item
9324  *   Flow pattern to translate.
9325  */
9326 static void
9327 flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
9328                                 void *matcher, void *key,
9329                                 const struct rte_flow_item *item)
9330 {
9331         const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
9332         const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
9333         uint32_t mask, value;
9334
9335         MLX5_ASSERT(tag_v);
9336         value = tag_v->data;
9337         mask = tag_m ? tag_m->data : UINT32_MAX;
9338         if (tag_v->id == REG_C_0) {
9339                 struct mlx5_priv *priv = dev->data->dev_private;
9340                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9341                 uint32_t shl_c0 = rte_bsf32(msk_c0);
9342
9343                 mask &= msk_c0;
9344                 mask <<= shl_c0;
9345                 value <<= shl_c0;
9346         }
9347         flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
9348 }
9349
9350 /**
9351  * Add TAG item to matcher
9352  *
9353  * @param[in] dev
9354  *   The devich to configure through.
9355  * @param[in, out] matcher
9356  *   Flow matcher.
9357  * @param[in, out] key
9358  *   Flow matcher value.
9359  * @param[in] item
9360  *   Flow pattern to translate.
9361  */
9362 static void
9363 flow_dv_translate_item_tag(struct rte_eth_dev *dev,
9364                            void *matcher, void *key,
9365                            const struct rte_flow_item *item)
9366 {
9367         const struct rte_flow_item_tag *tag_v = item->spec;
9368         const struct rte_flow_item_tag *tag_m = item->mask;
9369         enum modify_reg reg;
9370
9371         MLX5_ASSERT(tag_v);
9372         tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
9373         /* Get the metadata register index for the tag. */
9374         reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
9375         MLX5_ASSERT(reg > 0);
9376         flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
9377 }
9378
9379 /**
9380  * Add source vport match to the specified matcher.
9381  *
9382  * @param[in, out] matcher
9383  *   Flow matcher.
9384  * @param[in, out] key
9385  *   Flow matcher value.
9386  * @param[in] port
9387  *   Source vport value to match
9388  * @param[in] mask
9389  *   Mask
9390  */
9391 static void
9392 flow_dv_translate_item_source_vport(void *matcher, void *key,
9393                                     int16_t port, uint16_t mask)
9394 {
9395         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9396         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9397
9398         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
9399         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
9400 }
9401
9402 /**
9403  * Translate port-id item to eswitch match on  port-id.
9404  *
9405  * @param[in] dev
9406  *   The devich to configure through.
9407  * @param[in, out] matcher
9408  *   Flow matcher.
9409  * @param[in, out] key
9410  *   Flow matcher value.
9411  * @param[in] item
9412  *   Flow pattern to translate.
9413  * @param[in]
9414  *   Flow attributes.
9415  *
9416  * @return
9417  *   0 on success, a negative errno value otherwise.
9418  */
9419 static int
9420 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
9421                                void *key, const struct rte_flow_item *item,
9422                                const struct rte_flow_attr *attr)
9423 {
9424         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
9425         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
9426         struct mlx5_priv *priv;
9427         uint16_t mask, id;
9428
9429         mask = pid_m ? pid_m->id : 0xffff;
9430         id = pid_v ? pid_v->id : dev->data->port_id;
9431         priv = mlx5_port_to_eswitch_info(id, item == NULL);
9432         if (!priv)
9433                 return -rte_errno;
9434         /*
9435          * Translate to vport field or to metadata, depending on mode.
9436          * Kernel can use either misc.source_port or half of C0 metadata
9437          * register.
9438          */
9439         if (priv->vport_meta_mask) {
9440                 /*
9441                  * Provide the hint for SW steering library
9442                  * to insert the flow into ingress domain and
9443                  * save the extra vport match.
9444                  */
9445                 if (mask == 0xffff && priv->vport_id == 0xffff &&
9446                     priv->pf_bond < 0 && attr->transfer)
9447                         flow_dv_translate_item_source_vport
9448                                 (matcher, key, priv->vport_id, mask);
9449                 /*
9450                  * We should always set the vport metadata register,
9451                  * otherwise the SW steering library can drop
9452                  * the rule if wire vport metadata value is not zero,
9453                  * it depends on kernel configuration.
9454                  */
9455                 flow_dv_translate_item_meta_vport(matcher, key,
9456                                                   priv->vport_meta_tag,
9457                                                   priv->vport_meta_mask);
9458         } else {
9459                 flow_dv_translate_item_source_vport(matcher, key,
9460                                                     priv->vport_id, mask);
9461         }
9462         return 0;
9463 }
9464
9465 /**
9466  * Add ICMP6 item to matcher and to the value.
9467  *
9468  * @param[in, out] matcher
9469  *   Flow matcher.
9470  * @param[in, out] key
9471  *   Flow matcher value.
9472  * @param[in] item
9473  *   Flow pattern to translate.
9474  * @param[in] inner
9475  *   Item is inner pattern.
9476  */
9477 static void
9478 flow_dv_translate_item_icmp6(void *matcher, void *key,
9479                               const struct rte_flow_item *item,
9480                               int inner)
9481 {
9482         const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
9483         const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
9484         void *headers_m;
9485         void *headers_v;
9486         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9487                                      misc_parameters_3);
9488         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9489         if (inner) {
9490                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9491                                          inner_headers);
9492                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9493         } else {
9494                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9495                                          outer_headers);
9496                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9497         }
9498         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
9499         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
9500         if (!icmp6_v)
9501                 return;
9502         if (!icmp6_m)
9503                 icmp6_m = &rte_flow_item_icmp6_mask;
9504         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
9505         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
9506                  icmp6_v->type & icmp6_m->type);
9507         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
9508         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
9509                  icmp6_v->code & icmp6_m->code);
9510 }
9511
9512 /**
9513  * Add ICMP item to matcher and to the value.
9514  *
9515  * @param[in, out] matcher
9516  *   Flow matcher.
9517  * @param[in, out] key
9518  *   Flow matcher value.
9519  * @param[in] item
9520  *   Flow pattern to translate.
9521  * @param[in] inner
9522  *   Item is inner pattern.
9523  */
9524 static void
9525 flow_dv_translate_item_icmp(void *matcher, void *key,
9526                             const struct rte_flow_item *item,
9527                             int inner)
9528 {
9529         const struct rte_flow_item_icmp *icmp_m = item->mask;
9530         const struct rte_flow_item_icmp *icmp_v = item->spec;
9531         uint32_t icmp_header_data_m = 0;
9532         uint32_t icmp_header_data_v = 0;
9533         void *headers_m;
9534         void *headers_v;
9535         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9536                                      misc_parameters_3);
9537         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9538         if (inner) {
9539                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9540                                          inner_headers);
9541                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9542         } else {
9543                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9544                                          outer_headers);
9545                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9546         }
9547         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
9548         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
9549         if (!icmp_v)
9550                 return;
9551         if (!icmp_m)
9552                 icmp_m = &rte_flow_item_icmp_mask;
9553         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
9554                  icmp_m->hdr.icmp_type);
9555         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
9556                  icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
9557         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
9558                  icmp_m->hdr.icmp_code);
9559         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
9560                  icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
9561         icmp_header_data_m = rte_be_to_cpu_16(icmp_m->hdr.icmp_seq_nb);
9562         icmp_header_data_m |= rte_be_to_cpu_16(icmp_m->hdr.icmp_ident) << 16;
9563         if (icmp_header_data_m) {
9564                 icmp_header_data_v = rte_be_to_cpu_16(icmp_v->hdr.icmp_seq_nb);
9565                 icmp_header_data_v |=
9566                          rte_be_to_cpu_16(icmp_v->hdr.icmp_ident) << 16;
9567                 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_header_data,
9568                          icmp_header_data_m);
9569                 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_header_data,
9570                          icmp_header_data_v & icmp_header_data_m);
9571         }
9572 }
9573
9574 /**
9575  * Add GTP item to matcher and to the value.
9576  *
9577  * @param[in, out] matcher
9578  *   Flow matcher.
9579  * @param[in, out] key
9580  *   Flow matcher value.
9581  * @param[in] item
9582  *   Flow pattern to translate.
9583  * @param[in] inner
9584  *   Item is inner pattern.
9585  */
9586 static void
9587 flow_dv_translate_item_gtp(void *matcher, void *key,
9588                            const struct rte_flow_item *item, int inner)
9589 {
9590         const struct rte_flow_item_gtp *gtp_m = item->mask;
9591         const struct rte_flow_item_gtp *gtp_v = item->spec;
9592         void *headers_m;
9593         void *headers_v;
9594         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9595                                      misc_parameters_3);
9596         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9597         uint16_t dport = RTE_GTPU_UDP_PORT;
9598
9599         if (inner) {
9600                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9601                                          inner_headers);
9602                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9603         } else {
9604                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9605                                          outer_headers);
9606                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9607         }
9608         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9609                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
9610                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
9611         }
9612         if (!gtp_v)
9613                 return;
9614         if (!gtp_m)
9615                 gtp_m = &rte_flow_item_gtp_mask;
9616         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags,
9617                  gtp_m->v_pt_rsv_flags);
9618         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags,
9619                  gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags);
9620         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
9621         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
9622                  gtp_v->msg_type & gtp_m->msg_type);
9623         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,
9624                  rte_be_to_cpu_32(gtp_m->teid));
9625         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,
9626                  rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
9627 }
9628
9629 /**
9630  * Add GTP PSC item to matcher.
9631  *
9632  * @param[in, out] matcher
9633  *   Flow matcher.
9634  * @param[in, out] key
9635  *   Flow matcher value.
9636  * @param[in] item
9637  *   Flow pattern to translate.
9638  */
9639 static int
9640 flow_dv_translate_item_gtp_psc(void *matcher, void *key,
9641                                const struct rte_flow_item *item)
9642 {
9643         const struct rte_flow_item_gtp_psc *gtp_psc_m = item->mask;
9644         const struct rte_flow_item_gtp_psc *gtp_psc_v = item->spec;
9645         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9646                         misc_parameters_3);
9647         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9648         union {
9649                 uint32_t w32;
9650                 struct {
9651                         uint16_t seq_num;
9652                         uint8_t npdu_num;
9653                         uint8_t next_ext_header_type;
9654                 };
9655         } dw_2;
9656         uint8_t gtp_flags;
9657
9658         /* Always set E-flag match on one, regardless of GTP item settings. */
9659         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_m, gtpu_msg_flags);
9660         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
9661         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags, gtp_flags);
9662         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_v, gtpu_msg_flags);
9663         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
9664         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags, gtp_flags);
9665         /*Set next extension header type. */
9666         dw_2.seq_num = 0;
9667         dw_2.npdu_num = 0;
9668         dw_2.next_ext_header_type = 0xff;
9669         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_dw_2,
9670                  rte_cpu_to_be_32(dw_2.w32));
9671         dw_2.seq_num = 0;
9672         dw_2.npdu_num = 0;
9673         dw_2.next_ext_header_type = 0x85;
9674         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_dw_2,
9675                  rte_cpu_to_be_32(dw_2.w32));
9676         if (gtp_psc_v) {
9677                 union {
9678                         uint32_t w32;
9679                         struct {
9680                                 uint8_t len;
9681                                 uint8_t type_flags;
9682                                 uint8_t qfi;
9683                                 uint8_t reserved;
9684                         };
9685                 } dw_0;
9686
9687                 /*Set extension header PDU type and Qos. */
9688                 if (!gtp_psc_m)
9689                         gtp_psc_m = &rte_flow_item_gtp_psc_mask;
9690                 dw_0.w32 = 0;
9691                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_m->pdu_type);
9692                 dw_0.qfi = gtp_psc_m->qfi;
9693                 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_first_ext_dw_0,
9694                          rte_cpu_to_be_32(dw_0.w32));
9695                 dw_0.w32 = 0;
9696                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_v->pdu_type &
9697                                                         gtp_psc_m->pdu_type);
9698                 dw_0.qfi = gtp_psc_v->qfi & gtp_psc_m->qfi;
9699                 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_first_ext_dw_0,
9700                          rte_cpu_to_be_32(dw_0.w32));
9701         }
9702         return 0;
9703 }
9704
9705 /**
9706  * Add eCPRI item to matcher and to the value.
9707  *
9708  * @param[in] dev
9709  *   The devich to configure through.
9710  * @param[in, out] matcher
9711  *   Flow matcher.
9712  * @param[in, out] key
9713  *   Flow matcher value.
9714  * @param[in] item
9715  *   Flow pattern to translate.
9716  * @param[in] samples
9717  *   Sample IDs to be used in the matching.
9718  */
9719 static void
9720 flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
9721                              void *key, const struct rte_flow_item *item)
9722 {
9723         struct mlx5_priv *priv = dev->data->dev_private;
9724         const struct rte_flow_item_ecpri *ecpri_m = item->mask;
9725         const struct rte_flow_item_ecpri *ecpri_v = item->spec;
9726         struct rte_ecpri_common_hdr common;
9727         void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
9728                                      misc_parameters_4);
9729         void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
9730         uint32_t *samples;
9731         void *dw_m;
9732         void *dw_v;
9733
9734         if (!ecpri_v)
9735                 return;
9736         if (!ecpri_m)
9737                 ecpri_m = &rte_flow_item_ecpri_mask;
9738         /*
9739          * Maximal four DW samples are supported in a single matching now.
9740          * Two are used now for a eCPRI matching:
9741          * 1. Type: one byte, mask should be 0x00ff0000 in network order
9742          * 2. ID of a message: one or two bytes, mask 0xffff0000 or 0xff000000
9743          *    if any.
9744          */
9745         if (!ecpri_m->hdr.common.u32)
9746                 return;
9747         samples = priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0].ids;
9748         /* Need to take the whole DW as the mask to fill the entry. */
9749         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
9750                             prog_sample_field_value_0);
9751         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
9752                             prog_sample_field_value_0);
9753         /* Already big endian (network order) in the header. */
9754         *(uint32_t *)dw_m = ecpri_m->hdr.common.u32;
9755         *(uint32_t *)dw_v = ecpri_v->hdr.common.u32 & ecpri_m->hdr.common.u32;
9756         /* Sample#0, used for matching type, offset 0. */
9757         MLX5_SET(fte_match_set_misc4, misc4_m,
9758                  prog_sample_field_id_0, samples[0]);
9759         /* It makes no sense to set the sample ID in the mask field. */
9760         MLX5_SET(fte_match_set_misc4, misc4_v,
9761                  prog_sample_field_id_0, samples[0]);
9762         /*
9763          * Checking if message body part needs to be matched.
9764          * Some wildcard rules only matching type field should be supported.
9765          */
9766         if (ecpri_m->hdr.dummy[0]) {
9767                 common.u32 = rte_be_to_cpu_32(ecpri_v->hdr.common.u32);
9768                 switch (common.type) {
9769                 case RTE_ECPRI_MSG_TYPE_IQ_DATA:
9770                 case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
9771                 case RTE_ECPRI_MSG_TYPE_DLY_MSR:
9772                         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
9773                                             prog_sample_field_value_1);
9774                         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
9775                                             prog_sample_field_value_1);
9776                         *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0];
9777                         *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0] &
9778                                             ecpri_m->hdr.dummy[0];
9779                         /* Sample#1, to match message body, offset 4. */
9780                         MLX5_SET(fte_match_set_misc4, misc4_m,
9781                                  prog_sample_field_id_1, samples[1]);
9782                         MLX5_SET(fte_match_set_misc4, misc4_v,
9783                                  prog_sample_field_id_1, samples[1]);
9784                         break;
9785                 default:
9786                         /* Others, do not match any sample ID. */
9787                         break;
9788                 }
9789         }
9790 }
9791
9792 /*
9793  * Add connection tracking status item to matcher
9794  *
9795  * @param[in] dev
9796  *   The devich to configure through.
9797  * @param[in, out] matcher
9798  *   Flow matcher.
9799  * @param[in, out] key
9800  *   Flow matcher value.
9801  * @param[in] item
9802  *   Flow pattern to translate.
9803  */
9804 static void
9805 flow_dv_translate_item_aso_ct(struct rte_eth_dev *dev,
9806                               void *matcher, void *key,
9807                               const struct rte_flow_item *item)
9808 {
9809         uint32_t reg_value = 0;
9810         int reg_id;
9811         /* 8LSB 0b 11/0000/11, middle 4 bits are reserved. */
9812         uint32_t reg_mask = 0;
9813         const struct rte_flow_item_conntrack *spec = item->spec;
9814         const struct rte_flow_item_conntrack *mask = item->mask;
9815         uint32_t flags;
9816         struct rte_flow_error error;
9817
9818         if (!mask)
9819                 mask = &rte_flow_item_conntrack_mask;
9820         if (!spec || !mask->flags)
9821                 return;
9822         flags = spec->flags & mask->flags;
9823         /* The conflict should be checked in the validation. */
9824         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID)
9825                 reg_value |= MLX5_CT_SYNDROME_VALID;
9826         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
9827                 reg_value |= MLX5_CT_SYNDROME_STATE_CHANGE;
9828         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID)
9829                 reg_value |= MLX5_CT_SYNDROME_INVALID;
9830         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)
9831                 reg_value |= MLX5_CT_SYNDROME_TRAP;
9832         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
9833                 reg_value |= MLX5_CT_SYNDROME_BAD_PACKET;
9834         if (mask->flags & (RTE_FLOW_CONNTRACK_PKT_STATE_VALID |
9835                            RTE_FLOW_CONNTRACK_PKT_STATE_INVALID |
9836                            RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED))
9837                 reg_mask |= 0xc0;
9838         if (mask->flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
9839                 reg_mask |= MLX5_CT_SYNDROME_STATE_CHANGE;
9840         if (mask->flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
9841                 reg_mask |= MLX5_CT_SYNDROME_BAD_PACKET;
9842         /* The REG_C_x value could be saved during startup. */
9843         reg_id = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, &error);
9844         if (reg_id == REG_NON)
9845                 return;
9846         flow_dv_match_meta_reg(matcher, key, (enum modify_reg)reg_id,
9847                                reg_value, reg_mask);
9848 }
9849
9850 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
9851
9852 #define HEADER_IS_ZERO(match_criteria, headers)                              \
9853         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
9854                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
9855
9856 /**
9857  * Calculate flow matcher enable bitmap.
9858  *
9859  * @param match_criteria
9860  *   Pointer to flow matcher criteria.
9861  *
9862  * @return
9863  *   Bitmap of enabled fields.
9864  */
9865 static uint8_t
9866 flow_dv_matcher_enable(uint32_t *match_criteria)
9867 {
9868         uint8_t match_criteria_enable;
9869
9870         match_criteria_enable =
9871                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
9872                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
9873         match_criteria_enable |=
9874                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
9875                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
9876         match_criteria_enable |=
9877                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
9878                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
9879         match_criteria_enable |=
9880                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
9881                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
9882         match_criteria_enable |=
9883                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
9884                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
9885         match_criteria_enable |=
9886                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) <<
9887                 MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT;
9888         return match_criteria_enable;
9889 }
9890
9891 struct mlx5_hlist_entry *
9892 flow_dv_tbl_create_cb(struct mlx5_hlist *list, uint64_t key64, void *cb_ctx)
9893 {
9894         struct mlx5_dev_ctx_shared *sh = list->ctx;
9895         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9896         struct rte_eth_dev *dev = ctx->dev;
9897         struct mlx5_flow_tbl_data_entry *tbl_data;
9898         struct mlx5_flow_tbl_tunnel_prm *tt_prm = ctx->data;
9899         struct rte_flow_error *error = ctx->error;
9900         union mlx5_flow_tbl_key key = { .v64 = key64 };
9901         struct mlx5_flow_tbl_resource *tbl;
9902         void *domain;
9903         uint32_t idx = 0;
9904         int ret;
9905
9906         tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
9907         if (!tbl_data) {
9908                 rte_flow_error_set(error, ENOMEM,
9909                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9910                                    NULL,
9911                                    "cannot allocate flow table data entry");
9912                 return NULL;
9913         }
9914         tbl_data->idx = idx;
9915         tbl_data->tunnel = tt_prm->tunnel;
9916         tbl_data->group_id = tt_prm->group_id;
9917         tbl_data->external = !!tt_prm->external;
9918         tbl_data->tunnel_offload = is_tunnel_offload_active(dev);
9919         tbl_data->is_egress = !!key.is_egress;
9920         tbl_data->is_transfer = !!key.is_fdb;
9921         tbl_data->dummy = !!key.dummy;
9922         tbl_data->level = key.level;
9923         tbl_data->id = key.id;
9924         tbl = &tbl_data->tbl;
9925         if (key.dummy)
9926                 return &tbl_data->entry;
9927         if (key.is_fdb)
9928                 domain = sh->fdb_domain;
9929         else if (key.is_egress)
9930                 domain = sh->tx_domain;
9931         else
9932                 domain = sh->rx_domain;
9933         ret = mlx5_flow_os_create_flow_tbl(domain, key.level, &tbl->obj);
9934         if (ret) {
9935                 rte_flow_error_set(error, ENOMEM,
9936                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9937                                    NULL, "cannot create flow table object");
9938                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
9939                 return NULL;
9940         }
9941         if (key.level != 0) {
9942                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
9943                                         (tbl->obj, &tbl_data->jump.action);
9944                 if (ret) {
9945                         rte_flow_error_set(error, ENOMEM,
9946                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9947                                            NULL,
9948                                            "cannot create flow jump action");
9949                         mlx5_flow_os_destroy_flow_tbl(tbl->obj);
9950                         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
9951                         return NULL;
9952                 }
9953         }
9954         MKSTR(matcher_name, "%s_%s_%u_%u_matcher_cache",
9955               key.is_fdb ? "FDB" : "NIC", key.is_egress ? "egress" : "ingress",
9956               key.level, key.id);
9957         mlx5_cache_list_init(&tbl_data->matchers, matcher_name, 0, sh,
9958                              flow_dv_matcher_create_cb,
9959                              flow_dv_matcher_match_cb,
9960                              flow_dv_matcher_remove_cb);
9961         return &tbl_data->entry;
9962 }
9963
9964 int
9965 flow_dv_tbl_match_cb(struct mlx5_hlist *list __rte_unused,
9966                      struct mlx5_hlist_entry *entry, uint64_t key64,
9967                      void *cb_ctx __rte_unused)
9968 {
9969         struct mlx5_flow_tbl_data_entry *tbl_data =
9970                 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
9971         union mlx5_flow_tbl_key key = { .v64 = key64 };
9972
9973         return tbl_data->level != key.level ||
9974                tbl_data->id != key.id ||
9975                tbl_data->dummy != key.dummy ||
9976                tbl_data->is_transfer != !!key.is_fdb ||
9977                tbl_data->is_egress != !!key.is_egress;
9978 }
9979
9980 /**
9981  * Get a flow table.
9982  *
9983  * @param[in, out] dev
9984  *   Pointer to rte_eth_dev structure.
9985  * @param[in] table_level
9986  *   Table level to use.
9987  * @param[in] egress
9988  *   Direction of the table.
9989  * @param[in] transfer
9990  *   E-Switch or NIC flow.
9991  * @param[in] dummy
9992  *   Dummy entry for dv API.
9993  * @param[in] table_id
9994  *   Table id to use.
9995  * @param[out] error
9996  *   pointer to error structure.
9997  *
9998  * @return
9999  *   Returns tables resource based on the index, NULL in case of failed.
10000  */
10001 struct mlx5_flow_tbl_resource *
10002 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
10003                          uint32_t table_level, uint8_t egress,
10004                          uint8_t transfer,
10005                          bool external,
10006                          const struct mlx5_flow_tunnel *tunnel,
10007                          uint32_t group_id, uint8_t dummy,
10008                          uint32_t table_id,
10009                          struct rte_flow_error *error)
10010 {
10011         struct mlx5_priv *priv = dev->data->dev_private;
10012         union mlx5_flow_tbl_key table_key = {
10013                 {
10014                         .level = table_level,
10015                         .id = table_id,
10016                         .reserved = 0,
10017                         .dummy = !!dummy,
10018                         .is_fdb = !!transfer,
10019                         .is_egress = !!egress,
10020                 }
10021         };
10022         struct mlx5_flow_tbl_tunnel_prm tt_prm = {
10023                 .tunnel = tunnel,
10024                 .group_id = group_id,
10025                 .external = external,
10026         };
10027         struct mlx5_flow_cb_ctx ctx = {
10028                 .dev = dev,
10029                 .error = error,
10030                 .data = &tt_prm,
10031         };
10032         struct mlx5_hlist_entry *entry;
10033         struct mlx5_flow_tbl_data_entry *tbl_data;
10034
10035         entry = mlx5_hlist_register(priv->sh->flow_tbls, table_key.v64, &ctx);
10036         if (!entry) {
10037                 rte_flow_error_set(error, ENOMEM,
10038                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10039                                    "cannot get table");
10040                 return NULL;
10041         }
10042         DRV_LOG(DEBUG, "table_level %u table_id %u "
10043                 "tunnel %u group %u registered.",
10044                 table_level, table_id,
10045                 tunnel ? tunnel->tunnel_id : 0, group_id);
10046         tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10047         return &tbl_data->tbl;
10048 }
10049
10050 void
10051 flow_dv_tbl_remove_cb(struct mlx5_hlist *list,
10052                       struct mlx5_hlist_entry *entry)
10053 {
10054         struct mlx5_dev_ctx_shared *sh = list->ctx;
10055         struct mlx5_flow_tbl_data_entry *tbl_data =
10056                 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10057
10058         MLX5_ASSERT(entry && sh);
10059         if (tbl_data->jump.action)
10060                 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
10061         if (tbl_data->tbl.obj)
10062                 mlx5_flow_os_destroy_flow_tbl(tbl_data->tbl.obj);
10063         if (tbl_data->tunnel_offload && tbl_data->external) {
10064                 struct mlx5_hlist_entry *he;
10065                 struct mlx5_hlist *tunnel_grp_hash;
10066                 struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
10067                 union tunnel_tbl_key tunnel_key = {
10068                         .tunnel_id = tbl_data->tunnel ?
10069                                         tbl_data->tunnel->tunnel_id : 0,
10070                         .group = tbl_data->group_id
10071                 };
10072                 uint32_t table_level = tbl_data->level;
10073
10074                 tunnel_grp_hash = tbl_data->tunnel ?
10075                                         tbl_data->tunnel->groups :
10076                                         thub->groups;
10077                 he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val, NULL);
10078                 if (he)
10079                         mlx5_hlist_unregister(tunnel_grp_hash, he);
10080                 DRV_LOG(DEBUG,
10081                         "table_level %u id %u tunnel %u group %u released.",
10082                         table_level,
10083                         tbl_data->id,
10084                         tbl_data->tunnel ?
10085                         tbl_data->tunnel->tunnel_id : 0,
10086                         tbl_data->group_id);
10087         }
10088         mlx5_cache_list_destroy(&tbl_data->matchers);
10089         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
10090 }
10091
10092 /**
10093  * Release a flow table.
10094  *
10095  * @param[in] sh
10096  *   Pointer to device shared structure.
10097  * @param[in] tbl
10098  *   Table resource to be released.
10099  *
10100  * @return
10101  *   Returns 0 if table was released, else return 1;
10102  */
10103 static int
10104 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
10105                              struct mlx5_flow_tbl_resource *tbl)
10106 {
10107         struct mlx5_flow_tbl_data_entry *tbl_data =
10108                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
10109
10110         if (!tbl)
10111                 return 0;
10112         return mlx5_hlist_unregister(sh->flow_tbls, &tbl_data->entry);
10113 }
10114
10115 int
10116 flow_dv_matcher_match_cb(struct mlx5_cache_list *list __rte_unused,
10117                          struct mlx5_cache_entry *entry, void *cb_ctx)
10118 {
10119         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10120         struct mlx5_flow_dv_matcher *ref = ctx->data;
10121         struct mlx5_flow_dv_matcher *cur = container_of(entry, typeof(*cur),
10122                                                         entry);
10123
10124         return cur->crc != ref->crc ||
10125                cur->priority != ref->priority ||
10126                memcmp((const void *)cur->mask.buf,
10127                       (const void *)ref->mask.buf, ref->mask.size);
10128 }
10129
10130 struct mlx5_cache_entry *
10131 flow_dv_matcher_create_cb(struct mlx5_cache_list *list,
10132                           struct mlx5_cache_entry *entry __rte_unused,
10133                           void *cb_ctx)
10134 {
10135         struct mlx5_dev_ctx_shared *sh = list->ctx;
10136         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10137         struct mlx5_flow_dv_matcher *ref = ctx->data;
10138         struct mlx5_flow_dv_matcher *cache;
10139         struct mlx5dv_flow_matcher_attr dv_attr = {
10140                 .type = IBV_FLOW_ATTR_NORMAL,
10141                 .match_mask = (void *)&ref->mask,
10142         };
10143         struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
10144                                                             typeof(*tbl), tbl);
10145         int ret;
10146
10147         cache = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*cache), 0, SOCKET_ID_ANY);
10148         if (!cache) {
10149                 rte_flow_error_set(ctx->error, ENOMEM,
10150                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10151                                    "cannot create matcher");
10152                 return NULL;
10153         }
10154         *cache = *ref;
10155         dv_attr.match_criteria_enable =
10156                 flow_dv_matcher_enable(cache->mask.buf);
10157         dv_attr.priority = ref->priority;
10158         if (tbl->is_egress)
10159                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
10160         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->tbl.obj,
10161                                                &cache->matcher_object);
10162         if (ret) {
10163                 mlx5_free(cache);
10164                 rte_flow_error_set(ctx->error, ENOMEM,
10165                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10166                                    "cannot create matcher");
10167                 return NULL;
10168         }
10169         return &cache->entry;
10170 }
10171
10172 /**
10173  * Register the flow matcher.
10174  *
10175  * @param[in, out] dev
10176  *   Pointer to rte_eth_dev structure.
10177  * @param[in, out] matcher
10178  *   Pointer to flow matcher.
10179  * @param[in, out] key
10180  *   Pointer to flow table key.
10181  * @parm[in, out] dev_flow
10182  *   Pointer to the dev_flow.
10183  * @param[out] error
10184  *   pointer to error structure.
10185  *
10186  * @return
10187  *   0 on success otherwise -errno and errno is set.
10188  */
10189 static int
10190 flow_dv_matcher_register(struct rte_eth_dev *dev,
10191                          struct mlx5_flow_dv_matcher *ref,
10192                          union mlx5_flow_tbl_key *key,
10193                          struct mlx5_flow *dev_flow,
10194                          const struct mlx5_flow_tunnel *tunnel,
10195                          uint32_t group_id,
10196                          struct rte_flow_error *error)
10197 {
10198         struct mlx5_cache_entry *entry;
10199         struct mlx5_flow_dv_matcher *cache;
10200         struct mlx5_flow_tbl_resource *tbl;
10201         struct mlx5_flow_tbl_data_entry *tbl_data;
10202         struct mlx5_flow_cb_ctx ctx = {
10203                 .error = error,
10204                 .data = ref,
10205         };
10206
10207         /**
10208          * tunnel offload API requires this registration for cases when
10209          * tunnel match rule was inserted before tunnel set rule.
10210          */
10211         tbl = flow_dv_tbl_resource_get(dev, key->level,
10212                                        key->is_egress, key->is_fdb,
10213                                        dev_flow->external, tunnel,
10214                                        group_id, 0, key->id, error);
10215         if (!tbl)
10216                 return -rte_errno;      /* No need to refill the error info */
10217         tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
10218         ref->tbl = tbl;
10219         entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
10220         if (!entry) {
10221                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
10222                 return rte_flow_error_set(error, ENOMEM,
10223                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10224                                           "cannot allocate ref memory");
10225         }
10226         cache = container_of(entry, typeof(*cache), entry);
10227         dev_flow->handle->dvh.matcher = cache;
10228         return 0;
10229 }
10230
10231 struct mlx5_hlist_entry *
10232 flow_dv_tag_create_cb(struct mlx5_hlist *list, uint64_t key, void *ctx)
10233 {
10234         struct mlx5_dev_ctx_shared *sh = list->ctx;
10235         struct rte_flow_error *error = ctx;
10236         struct mlx5_flow_dv_tag_resource *entry;
10237         uint32_t idx = 0;
10238         int ret;
10239
10240         entry = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
10241         if (!entry) {
10242                 rte_flow_error_set(error, ENOMEM,
10243                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10244                                    "cannot allocate resource memory");
10245                 return NULL;
10246         }
10247         entry->idx = idx;
10248         entry->tag_id = key;
10249         ret = mlx5_flow_os_create_flow_action_tag(key,
10250                                                   &entry->action);
10251         if (ret) {
10252                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], idx);
10253                 rte_flow_error_set(error, ENOMEM,
10254                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10255                                    NULL, "cannot create action");
10256                 return NULL;
10257         }
10258         return &entry->entry;
10259 }
10260
10261 int
10262 flow_dv_tag_match_cb(struct mlx5_hlist *list __rte_unused,
10263                      struct mlx5_hlist_entry *entry, uint64_t key,
10264                      void *cb_ctx __rte_unused)
10265 {
10266         struct mlx5_flow_dv_tag_resource *tag =
10267                 container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10268
10269         return key != tag->tag_id;
10270 }
10271
10272 /**
10273  * Find existing tag resource or create and register a new one.
10274  *
10275  * @param dev[in, out]
10276  *   Pointer to rte_eth_dev structure.
10277  * @param[in, out] tag_be24
10278  *   Tag value in big endian then R-shift 8.
10279  * @parm[in, out] dev_flow
10280  *   Pointer to the dev_flow.
10281  * @param[out] error
10282  *   pointer to error structure.
10283  *
10284  * @return
10285  *   0 on success otherwise -errno and errno is set.
10286  */
10287 static int
10288 flow_dv_tag_resource_register
10289                         (struct rte_eth_dev *dev,
10290                          uint32_t tag_be24,
10291                          struct mlx5_flow *dev_flow,
10292                          struct rte_flow_error *error)
10293 {
10294         struct mlx5_priv *priv = dev->data->dev_private;
10295         struct mlx5_flow_dv_tag_resource *cache_resource;
10296         struct mlx5_hlist_entry *entry;
10297
10298         entry = mlx5_hlist_register(priv->sh->tag_table, tag_be24, error);
10299         if (entry) {
10300                 cache_resource = container_of
10301                         (entry, struct mlx5_flow_dv_tag_resource, entry);
10302                 dev_flow->handle->dvh.rix_tag = cache_resource->idx;
10303                 dev_flow->dv.tag_resource = cache_resource;
10304                 return 0;
10305         }
10306         return -rte_errno;
10307 }
10308
10309 void
10310 flow_dv_tag_remove_cb(struct mlx5_hlist *list,
10311                       struct mlx5_hlist_entry *entry)
10312 {
10313         struct mlx5_dev_ctx_shared *sh = list->ctx;
10314         struct mlx5_flow_dv_tag_resource *tag =
10315                 container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10316
10317         MLX5_ASSERT(tag && sh && tag->action);
10318         claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
10319         DRV_LOG(DEBUG, "Tag %p: removed.", (void *)tag);
10320         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
10321 }
10322
10323 /**
10324  * Release the tag.
10325  *
10326  * @param dev
10327  *   Pointer to Ethernet device.
10328  * @param tag_idx
10329  *   Tag index.
10330  *
10331  * @return
10332  *   1 while a reference on it exists, 0 when freed.
10333  */
10334 static int
10335 flow_dv_tag_release(struct rte_eth_dev *dev,
10336                     uint32_t tag_idx)
10337 {
10338         struct mlx5_priv *priv = dev->data->dev_private;
10339         struct mlx5_flow_dv_tag_resource *tag;
10340
10341         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
10342         if (!tag)
10343                 return 0;
10344         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
10345                 dev->data->port_id, (void *)tag, tag->entry.ref_cnt);
10346         return mlx5_hlist_unregister(priv->sh->tag_table, &tag->entry);
10347 }
10348
10349 /**
10350  * Translate port ID action to vport.
10351  *
10352  * @param[in] dev
10353  *   Pointer to rte_eth_dev structure.
10354  * @param[in] action
10355  *   Pointer to the port ID action.
10356  * @param[out] dst_port_id
10357  *   The target port ID.
10358  * @param[out] error
10359  *   Pointer to the error structure.
10360  *
10361  * @return
10362  *   0 on success, a negative errno value otherwise and rte_errno is set.
10363  */
10364 static int
10365 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
10366                                  const struct rte_flow_action *action,
10367                                  uint32_t *dst_port_id,
10368                                  struct rte_flow_error *error)
10369 {
10370         uint32_t port;
10371         struct mlx5_priv *priv;
10372         const struct rte_flow_action_port_id *conf =
10373                         (const struct rte_flow_action_port_id *)action->conf;
10374
10375         port = conf->original ? dev->data->port_id : conf->id;
10376         priv = mlx5_port_to_eswitch_info(port, false);
10377         if (!priv)
10378                 return rte_flow_error_set(error, -rte_errno,
10379                                           RTE_FLOW_ERROR_TYPE_ACTION,
10380                                           NULL,
10381                                           "No eswitch info was found for port");
10382 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
10383         /*
10384          * This parameter is transferred to
10385          * mlx5dv_dr_action_create_dest_ib_port().
10386          */
10387         *dst_port_id = priv->dev_port;
10388 #else
10389         /*
10390          * Legacy mode, no LAG configurations is supported.
10391          * This parameter is transferred to
10392          * mlx5dv_dr_action_create_dest_vport().
10393          */
10394         *dst_port_id = priv->vport_id;
10395 #endif
10396         return 0;
10397 }
10398
10399 /**
10400  * Create a counter with aging configuration.
10401  *
10402  * @param[in] dev
10403  *   Pointer to rte_eth_dev structure.
10404  * @param[in] dev_flow
10405  *   Pointer to the mlx5_flow.
10406  * @param[out] count
10407  *   Pointer to the counter action configuration.
10408  * @param[in] age
10409  *   Pointer to the aging action configuration.
10410  *
10411  * @return
10412  *   Index to flow counter on success, 0 otherwise.
10413  */
10414 static uint32_t
10415 flow_dv_translate_create_counter(struct rte_eth_dev *dev,
10416                                 struct mlx5_flow *dev_flow,
10417                                 const struct rte_flow_action_count *count,
10418                                 const struct rte_flow_action_age *age)
10419 {
10420         uint32_t counter;
10421         struct mlx5_age_param *age_param;
10422
10423         if (count && count->shared)
10424                 counter = flow_dv_counter_get_shared(dev, count->id);
10425         else
10426                 counter = flow_dv_counter_alloc(dev, !!age);
10427         if (!counter || age == NULL)
10428                 return counter;
10429         age_param = flow_dv_counter_idx_get_age(dev, counter);
10430         age_param->context = age->context ? age->context :
10431                 (void *)(uintptr_t)(dev_flow->flow_idx);
10432         age_param->timeout = age->timeout;
10433         age_param->port_id = dev->data->port_id;
10434         __atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);
10435         __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
10436         return counter;
10437 }
10438
10439 /**
10440  * Add Tx queue matcher
10441  *
10442  * @param[in] dev
10443  *   Pointer to the dev struct.
10444  * @param[in, out] matcher
10445  *   Flow matcher.
10446  * @param[in, out] key
10447  *   Flow matcher value.
10448  * @param[in] item
10449  *   Flow pattern to translate.
10450  * @param[in] inner
10451  *   Item is inner pattern.
10452  */
10453 static void
10454 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
10455                                 void *matcher, void *key,
10456                                 const struct rte_flow_item *item)
10457 {
10458         const struct mlx5_rte_flow_item_tx_queue *queue_m;
10459         const struct mlx5_rte_flow_item_tx_queue *queue_v;
10460         void *misc_m =
10461                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
10462         void *misc_v =
10463                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
10464         struct mlx5_txq_ctrl *txq;
10465         uint32_t queue;
10466
10467
10468         queue_m = (const void *)item->mask;
10469         if (!queue_m)
10470                 return;
10471         queue_v = (const void *)item->spec;
10472         if (!queue_v)
10473                 return;
10474         txq = mlx5_txq_get(dev, queue_v->queue);
10475         if (!txq)
10476                 return;
10477         queue = txq->obj->sq->id;
10478         MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue);
10479         MLX5_SET(fte_match_set_misc, misc_v, source_sqn,
10480                  queue & queue_m->queue);
10481         mlx5_txq_release(dev, queue_v->queue);
10482 }
10483
10484 /**
10485  * Set the hash fields according to the @p flow information.
10486  *
10487  * @param[in] dev_flow
10488  *   Pointer to the mlx5_flow.
10489  * @param[in] rss_desc
10490  *   Pointer to the mlx5_flow_rss_desc.
10491  */
10492 static void
10493 flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
10494                        struct mlx5_flow_rss_desc *rss_desc)
10495 {
10496         uint64_t items = dev_flow->handle->layers;
10497         int rss_inner = 0;
10498         uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
10499
10500         dev_flow->hash_fields = 0;
10501 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
10502         if (rss_desc->level >= 2) {
10503                 dev_flow->hash_fields |= IBV_RX_HASH_INNER;
10504                 rss_inner = 1;
10505         }
10506 #endif
10507         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
10508             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
10509                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
10510                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
10511                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
10512                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
10513                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
10514                         else
10515                                 dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
10516                 }
10517         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
10518                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
10519                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
10520                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
10521                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
10522                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
10523                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
10524                         else
10525                                 dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
10526                 }
10527         }
10528         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
10529             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
10530                 if (rss_types & ETH_RSS_UDP) {
10531                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
10532                                 dev_flow->hash_fields |=
10533                                                 IBV_RX_HASH_SRC_PORT_UDP;
10534                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
10535                                 dev_flow->hash_fields |=
10536                                                 IBV_RX_HASH_DST_PORT_UDP;
10537                         else
10538                                 dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
10539                 }
10540         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
10541                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
10542                 if (rss_types & ETH_RSS_TCP) {
10543                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
10544                                 dev_flow->hash_fields |=
10545                                                 IBV_RX_HASH_SRC_PORT_TCP;
10546                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
10547                                 dev_flow->hash_fields |=
10548                                                 IBV_RX_HASH_DST_PORT_TCP;
10549                         else
10550                                 dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
10551                 }
10552         }
10553 }
10554
10555 /**
10556  * Prepare an Rx Hash queue.
10557  *
10558  * @param dev
10559  *   Pointer to Ethernet device.
10560  * @param[in] dev_flow
10561  *   Pointer to the mlx5_flow.
10562  * @param[in] rss_desc
10563  *   Pointer to the mlx5_flow_rss_desc.
10564  * @param[out] hrxq_idx
10565  *   Hash Rx queue index.
10566  *
10567  * @return
10568  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
10569  */
10570 static struct mlx5_hrxq *
10571 flow_dv_hrxq_prepare(struct rte_eth_dev *dev,
10572                      struct mlx5_flow *dev_flow,
10573                      struct mlx5_flow_rss_desc *rss_desc,
10574                      uint32_t *hrxq_idx)
10575 {
10576         struct mlx5_priv *priv = dev->data->dev_private;
10577         struct mlx5_flow_handle *dh = dev_flow->handle;
10578         struct mlx5_hrxq *hrxq;
10579
10580         MLX5_ASSERT(rss_desc->queue_num);
10581         rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
10582         rss_desc->hash_fields = dev_flow->hash_fields;
10583         rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL);
10584         rss_desc->shared_rss = 0;
10585         *hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
10586         if (!*hrxq_idx)
10587                 return NULL;
10588         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
10589                               *hrxq_idx);
10590         return hrxq;
10591 }
10592
10593 /**
10594  * Release sample sub action resource.
10595  *
10596  * @param[in, out] dev
10597  *   Pointer to rte_eth_dev structure.
10598  * @param[in] act_res
10599  *   Pointer to sample sub action resource.
10600  */
10601 static void
10602 flow_dv_sample_sub_actions_release(struct rte_eth_dev *dev,
10603                                    struct mlx5_flow_sub_actions_idx *act_res)
10604 {
10605         if (act_res->rix_hrxq) {
10606                 mlx5_hrxq_release(dev, act_res->rix_hrxq);
10607                 act_res->rix_hrxq = 0;
10608         }
10609         if (act_res->rix_encap_decap) {
10610                 flow_dv_encap_decap_resource_release(dev,
10611                                                      act_res->rix_encap_decap);
10612                 act_res->rix_encap_decap = 0;
10613         }
10614         if (act_res->rix_port_id_action) {
10615                 flow_dv_port_id_action_resource_release(dev,
10616                                                 act_res->rix_port_id_action);
10617                 act_res->rix_port_id_action = 0;
10618         }
10619         if (act_res->rix_tag) {
10620                 flow_dv_tag_release(dev, act_res->rix_tag);
10621                 act_res->rix_tag = 0;
10622         }
10623         if (act_res->rix_jump) {
10624                 flow_dv_jump_tbl_resource_release(dev, act_res->rix_jump);
10625                 act_res->rix_jump = 0;
10626         }
10627 }
10628
10629 int
10630 flow_dv_sample_match_cb(struct mlx5_cache_list *list __rte_unused,
10631                         struct mlx5_cache_entry *entry, void *cb_ctx)
10632 {
10633         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10634         struct rte_eth_dev *dev = ctx->dev;
10635         struct mlx5_flow_dv_sample_resource *resource = ctx->data;
10636         struct mlx5_flow_dv_sample_resource *cache_resource =
10637                         container_of(entry, typeof(*cache_resource), entry);
10638
10639         if (resource->ratio == cache_resource->ratio &&
10640             resource->ft_type == cache_resource->ft_type &&
10641             resource->ft_id == cache_resource->ft_id &&
10642             resource->set_action == cache_resource->set_action &&
10643             !memcmp((void *)&resource->sample_act,
10644                     (void *)&cache_resource->sample_act,
10645                     sizeof(struct mlx5_flow_sub_actions_list))) {
10646                 /*
10647                  * Existing sample action should release the prepared
10648                  * sub-actions reference counter.
10649                  */
10650                 flow_dv_sample_sub_actions_release(dev,
10651                                                 &resource->sample_idx);
10652                 return 0;
10653         }
10654         return 1;
10655 }
10656
10657 struct mlx5_cache_entry *
10658 flow_dv_sample_create_cb(struct mlx5_cache_list *list __rte_unused,
10659                          struct mlx5_cache_entry *entry __rte_unused,
10660                          void *cb_ctx)
10661 {
10662         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10663         struct rte_eth_dev *dev = ctx->dev;
10664         struct mlx5_flow_dv_sample_resource *resource = ctx->data;
10665         void **sample_dv_actions = resource->sub_actions;
10666         struct mlx5_flow_dv_sample_resource *cache_resource;
10667         struct mlx5dv_dr_flow_sampler_attr sampler_attr;
10668         struct mlx5_priv *priv = dev->data->dev_private;
10669         struct mlx5_dev_ctx_shared *sh = priv->sh;
10670         struct mlx5_flow_tbl_resource *tbl;
10671         uint32_t idx = 0;
10672         const uint32_t next_ft_step = 1;
10673         uint32_t next_ft_id = resource->ft_id + next_ft_step;
10674         uint8_t is_egress = 0;
10675         uint8_t is_transfer = 0;
10676         struct rte_flow_error *error = ctx->error;
10677
10678         /* Register new sample resource. */
10679         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
10680         if (!cache_resource) {
10681                 rte_flow_error_set(error, ENOMEM,
10682                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10683                                           NULL,
10684                                           "cannot allocate resource memory");
10685                 return NULL;
10686         }
10687         *cache_resource = *resource;
10688         /* Create normal path table level */
10689         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
10690                 is_transfer = 1;
10691         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
10692                 is_egress = 1;
10693         tbl = flow_dv_tbl_resource_get(dev, next_ft_id,
10694                                         is_egress, is_transfer,
10695                                         true, NULL, 0, 0, 0, error);
10696         if (!tbl) {
10697                 rte_flow_error_set(error, ENOMEM,
10698                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10699                                           NULL,
10700                                           "fail to create normal path table "
10701                                           "for sample");
10702                 goto error;
10703         }
10704         cache_resource->normal_path_tbl = tbl;
10705         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
10706                 if (!sh->default_miss_action) {
10707                         rte_flow_error_set(error, ENOMEM,
10708                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10709                                                 NULL,
10710                                                 "default miss action was not "
10711                                                 "created");
10712                         goto error;
10713                 }
10714                 sample_dv_actions[resource->sample_act.actions_num++] =
10715                                                 sh->default_miss_action;
10716         }
10717         /* Create a DR sample action */
10718         sampler_attr.sample_ratio = cache_resource->ratio;
10719         sampler_attr.default_next_table = tbl->obj;
10720         sampler_attr.num_sample_actions = resource->sample_act.actions_num;
10721         sampler_attr.sample_actions = (struct mlx5dv_dr_action **)
10722                                                         &sample_dv_actions[0];
10723         sampler_attr.action = cache_resource->set_action;
10724         if (mlx5_os_flow_dr_create_flow_action_sampler
10725                         (&sampler_attr, &cache_resource->verbs_action)) {
10726                 rte_flow_error_set(error, ENOMEM,
10727                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10728                                         NULL, "cannot create sample action");
10729                 goto error;
10730         }
10731         cache_resource->idx = idx;
10732         cache_resource->dev = dev;
10733         return &cache_resource->entry;
10734 error:
10735         if (cache_resource->ft_type != MLX5DV_FLOW_TABLE_TYPE_FDB)
10736                 flow_dv_sample_sub_actions_release(dev,
10737                                                    &cache_resource->sample_idx);
10738         if (cache_resource->normal_path_tbl)
10739                 flow_dv_tbl_resource_release(MLX5_SH(dev),
10740                                 cache_resource->normal_path_tbl);
10741         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE], idx);
10742         return NULL;
10743
10744 }
10745
10746 /**
10747  * Find existing sample resource or create and register a new one.
10748  *
10749  * @param[in, out] dev
10750  *   Pointer to rte_eth_dev structure.
10751  * @param[in] resource
10752  *   Pointer to sample resource.
10753  * @parm[in, out] dev_flow
10754  *   Pointer to the dev_flow.
10755  * @param[out] error
10756  *   pointer to error structure.
10757  *
10758  * @return
10759  *   0 on success otherwise -errno and errno is set.
10760  */
10761 static int
10762 flow_dv_sample_resource_register(struct rte_eth_dev *dev,
10763                          struct mlx5_flow_dv_sample_resource *resource,
10764                          struct mlx5_flow *dev_flow,
10765                          struct rte_flow_error *error)
10766 {
10767         struct mlx5_flow_dv_sample_resource *cache_resource;
10768         struct mlx5_cache_entry *entry;
10769         struct mlx5_priv *priv = dev->data->dev_private;
10770         struct mlx5_flow_cb_ctx ctx = {
10771                 .dev = dev,
10772                 .error = error,
10773                 .data = resource,
10774         };
10775
10776         entry = mlx5_cache_register(&priv->sh->sample_action_list, &ctx);
10777         if (!entry)
10778                 return -rte_errno;
10779         cache_resource = container_of(entry, typeof(*cache_resource), entry);
10780         dev_flow->handle->dvh.rix_sample = cache_resource->idx;
10781         dev_flow->dv.sample_res = cache_resource;
10782         return 0;
10783 }
10784
10785 int
10786 flow_dv_dest_array_match_cb(struct mlx5_cache_list *list __rte_unused,
10787                             struct mlx5_cache_entry *entry, void *cb_ctx)
10788 {
10789         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10790         struct mlx5_flow_dv_dest_array_resource *resource = ctx->data;
10791         struct rte_eth_dev *dev = ctx->dev;
10792         struct mlx5_flow_dv_dest_array_resource *cache_resource =
10793                         container_of(entry, typeof(*cache_resource), entry);
10794         uint32_t idx = 0;
10795
10796         if (resource->num_of_dest == cache_resource->num_of_dest &&
10797             resource->ft_type == cache_resource->ft_type &&
10798             !memcmp((void *)cache_resource->sample_act,
10799                     (void *)resource->sample_act,
10800                    (resource->num_of_dest *
10801                    sizeof(struct mlx5_flow_sub_actions_list)))) {
10802                 /*
10803                  * Existing sample action should release the prepared
10804                  * sub-actions reference counter.
10805                  */
10806                 for (idx = 0; idx < resource->num_of_dest; idx++)
10807                         flow_dv_sample_sub_actions_release(dev,
10808                                         &resource->sample_idx[idx]);
10809                 return 0;
10810         }
10811         return 1;
10812 }
10813
10814 struct mlx5_cache_entry *
10815 flow_dv_dest_array_create_cb(struct mlx5_cache_list *list __rte_unused,
10816                          struct mlx5_cache_entry *entry __rte_unused,
10817                          void *cb_ctx)
10818 {
10819         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10820         struct rte_eth_dev *dev = ctx->dev;
10821         struct mlx5_flow_dv_dest_array_resource *cache_resource;
10822         struct mlx5_flow_dv_dest_array_resource *resource = ctx->data;
10823         struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 };
10824         struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM];
10825         struct mlx5_priv *priv = dev->data->dev_private;
10826         struct mlx5_dev_ctx_shared *sh = priv->sh;
10827         struct mlx5_flow_sub_actions_list *sample_act;
10828         struct mlx5dv_dr_domain *domain;
10829         uint32_t idx = 0, res_idx = 0;
10830         struct rte_flow_error *error = ctx->error;
10831         uint64_t action_flags;
10832         int ret;
10833
10834         /* Register new destination array resource. */
10835         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
10836                                             &res_idx);
10837         if (!cache_resource) {
10838                 rte_flow_error_set(error, ENOMEM,
10839                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10840                                           NULL,
10841                                           "cannot allocate resource memory");
10842                 return NULL;
10843         }
10844         *cache_resource = *resource;
10845         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
10846                 domain = sh->fdb_domain;
10847         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
10848                 domain = sh->rx_domain;
10849         else
10850                 domain = sh->tx_domain;
10851         for (idx = 0; idx < resource->num_of_dest; idx++) {
10852                 dest_attr[idx] = (struct mlx5dv_dr_action_dest_attr *)
10853                                  mlx5_malloc(MLX5_MEM_ZERO,
10854                                  sizeof(struct mlx5dv_dr_action_dest_attr),
10855                                  0, SOCKET_ID_ANY);
10856                 if (!dest_attr[idx]) {
10857                         rte_flow_error_set(error, ENOMEM,
10858                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10859                                            NULL,
10860                                            "cannot allocate resource memory");
10861                         goto error;
10862                 }
10863                 dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST;
10864                 sample_act = &resource->sample_act[idx];
10865                 action_flags = sample_act->action_flags;
10866                 switch (action_flags) {
10867                 case MLX5_FLOW_ACTION_QUEUE:
10868                         dest_attr[idx]->dest = sample_act->dr_queue_action;
10869                         break;
10870                 case (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_ENCAP):
10871                         dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST_REFORMAT;
10872                         dest_attr[idx]->dest_reformat = &dest_reformat[idx];
10873                         dest_attr[idx]->dest_reformat->reformat =
10874                                         sample_act->dr_encap_action;
10875                         dest_attr[idx]->dest_reformat->dest =
10876                                         sample_act->dr_port_id_action;
10877                         break;
10878                 case MLX5_FLOW_ACTION_PORT_ID:
10879                         dest_attr[idx]->dest = sample_act->dr_port_id_action;
10880                         break;
10881                 case MLX5_FLOW_ACTION_JUMP:
10882                         dest_attr[idx]->dest = sample_act->dr_jump_action;
10883                         break;
10884                 default:
10885                         rte_flow_error_set(error, EINVAL,
10886                                            RTE_FLOW_ERROR_TYPE_ACTION,
10887                                            NULL,
10888                                            "unsupported actions type");
10889                         goto error;
10890                 }
10891         }
10892         /* create a dest array actioin */
10893         ret = mlx5_os_flow_dr_create_flow_action_dest_array
10894                                                 (domain,
10895                                                  cache_resource->num_of_dest,
10896                                                  dest_attr,
10897                                                  &cache_resource->action);
10898         if (ret) {
10899                 rte_flow_error_set(error, ENOMEM,
10900                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10901                                    NULL,
10902                                    "cannot create destination array action");
10903                 goto error;
10904         }
10905         cache_resource->idx = res_idx;
10906         cache_resource->dev = dev;
10907         for (idx = 0; idx < resource->num_of_dest; idx++)
10908                 mlx5_free(dest_attr[idx]);
10909         return &cache_resource->entry;
10910 error:
10911         for (idx = 0; idx < resource->num_of_dest; idx++) {
10912                 flow_dv_sample_sub_actions_release(dev,
10913                                 &cache_resource->sample_idx[idx]);
10914                 if (dest_attr[idx])
10915                         mlx5_free(dest_attr[idx]);
10916         }
10917
10918         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY], res_idx);
10919         return NULL;
10920 }
10921
10922 /**
10923  * Find existing destination array resource or create and register a new one.
10924  *
10925  * @param[in, out] dev
10926  *   Pointer to rte_eth_dev structure.
10927  * @param[in] resource
10928  *   Pointer to destination array resource.
10929  * @parm[in, out] dev_flow
10930  *   Pointer to the dev_flow.
10931  * @param[out] error
10932  *   pointer to error structure.
10933  *
10934  * @return
10935  *   0 on success otherwise -errno and errno is set.
10936  */
10937 static int
10938 flow_dv_dest_array_resource_register(struct rte_eth_dev *dev,
10939                          struct mlx5_flow_dv_dest_array_resource *resource,
10940                          struct mlx5_flow *dev_flow,
10941                          struct rte_flow_error *error)
10942 {
10943         struct mlx5_flow_dv_dest_array_resource *cache_resource;
10944         struct mlx5_priv *priv = dev->data->dev_private;
10945         struct mlx5_cache_entry *entry;
10946         struct mlx5_flow_cb_ctx ctx = {
10947                 .dev = dev,
10948                 .error = error,
10949                 .data = resource,
10950         };
10951
10952         entry = mlx5_cache_register(&priv->sh->dest_array_list, &ctx);
10953         if (!entry)
10954                 return -rte_errno;
10955         cache_resource = container_of(entry, typeof(*cache_resource), entry);
10956         dev_flow->handle->dvh.rix_dest_array = cache_resource->idx;
10957         dev_flow->dv.dest_array_res = cache_resource;
10958         return 0;
10959 }
10960
10961 /**
10962  * Convert Sample action to DV specification.
10963  *
10964  * @param[in] dev
10965  *   Pointer to rte_eth_dev structure.
10966  * @param[in] action
10967  *   Pointer to sample action structure.
10968  * @param[in, out] dev_flow
10969  *   Pointer to the mlx5_flow.
10970  * @param[in] attr
10971  *   Pointer to the flow attributes.
10972  * @param[in, out] num_of_dest
10973  *   Pointer to the num of destination.
10974  * @param[in, out] sample_actions
10975  *   Pointer to sample actions list.
10976  * @param[in, out] res
10977  *   Pointer to sample resource.
10978  * @param[out] error
10979  *   Pointer to the error structure.
10980  *
10981  * @return
10982  *   0 on success, a negative errno value otherwise and rte_errno is set.
10983  */
10984 static int
10985 flow_dv_translate_action_sample(struct rte_eth_dev *dev,
10986                                 const struct rte_flow_action_sample *action,
10987                                 struct mlx5_flow *dev_flow,
10988                                 const struct rte_flow_attr *attr,
10989                                 uint32_t *num_of_dest,
10990                                 void **sample_actions,
10991                                 struct mlx5_flow_dv_sample_resource *res,
10992                                 struct rte_flow_error *error)
10993 {
10994         struct mlx5_priv *priv = dev->data->dev_private;
10995         const struct rte_flow_action *sub_actions;
10996         struct mlx5_flow_sub_actions_list *sample_act;
10997         struct mlx5_flow_sub_actions_idx *sample_idx;
10998         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
10999         struct rte_flow *flow = dev_flow->flow;
11000         struct mlx5_flow_rss_desc *rss_desc;
11001         uint64_t action_flags = 0;
11002
11003         MLX5_ASSERT(wks);
11004         rss_desc = &wks->rss_desc;
11005         sample_act = &res->sample_act;
11006         sample_idx = &res->sample_idx;
11007         res->ratio = action->ratio;
11008         sub_actions = action->actions;
11009         for (; sub_actions->type != RTE_FLOW_ACTION_TYPE_END; sub_actions++) {
11010                 int type = sub_actions->type;
11011                 uint32_t pre_rix = 0;
11012                 void *pre_r;
11013                 switch (type) {
11014                 case RTE_FLOW_ACTION_TYPE_QUEUE:
11015                 {
11016                         const struct rte_flow_action_queue *queue;
11017                         struct mlx5_hrxq *hrxq;
11018                         uint32_t hrxq_idx;
11019
11020                         queue = sub_actions->conf;
11021                         rss_desc->queue_num = 1;
11022                         rss_desc->queue[0] = queue->index;
11023                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11024                                                     rss_desc, &hrxq_idx);
11025                         if (!hrxq)
11026                                 return rte_flow_error_set
11027                                         (error, rte_errno,
11028                                          RTE_FLOW_ERROR_TYPE_ACTION,
11029                                          NULL,
11030                                          "cannot create fate queue");
11031                         sample_act->dr_queue_action = hrxq->action;
11032                         sample_idx->rix_hrxq = hrxq_idx;
11033                         sample_actions[sample_act->actions_num++] =
11034                                                 hrxq->action;
11035                         (*num_of_dest)++;
11036                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
11037                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11038                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11039                         dev_flow->handle->fate_action =
11040                                         MLX5_FLOW_FATE_QUEUE;
11041                         break;
11042                 }
11043                 case RTE_FLOW_ACTION_TYPE_RSS:
11044                 {
11045                         struct mlx5_hrxq *hrxq;
11046                         uint32_t hrxq_idx;
11047                         const struct rte_flow_action_rss *rss;
11048                         const uint8_t *rss_key;
11049
11050                         rss = sub_actions->conf;
11051                         memcpy(rss_desc->queue, rss->queue,
11052                                rss->queue_num * sizeof(uint16_t));
11053                         rss_desc->queue_num = rss->queue_num;
11054                         /* NULL RSS key indicates default RSS key. */
11055                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
11056                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
11057                         /*
11058                          * rss->level and rss.types should be set in advance
11059                          * when expanding items for RSS.
11060                          */
11061                         flow_dv_hashfields_set(dev_flow, rss_desc);
11062                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11063                                                     rss_desc, &hrxq_idx);
11064                         if (!hrxq)
11065                                 return rte_flow_error_set
11066                                         (error, rte_errno,
11067                                          RTE_FLOW_ERROR_TYPE_ACTION,
11068                                          NULL,
11069                                          "cannot create fate queue");
11070                         sample_act->dr_queue_action = hrxq->action;
11071                         sample_idx->rix_hrxq = hrxq_idx;
11072                         sample_actions[sample_act->actions_num++] =
11073                                                 hrxq->action;
11074                         (*num_of_dest)++;
11075                         action_flags |= MLX5_FLOW_ACTION_RSS;
11076                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11077                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11078                         dev_flow->handle->fate_action =
11079                                         MLX5_FLOW_FATE_QUEUE;
11080                         break;
11081                 }
11082                 case RTE_FLOW_ACTION_TYPE_MARK:
11083                 {
11084                         uint32_t tag_be = mlx5_flow_mark_set
11085                                 (((const struct rte_flow_action_mark *)
11086                                 (sub_actions->conf))->id);
11087
11088                         dev_flow->handle->mark = 1;
11089                         pre_rix = dev_flow->handle->dvh.rix_tag;
11090                         /* Save the mark resource before sample */
11091                         pre_r = dev_flow->dv.tag_resource;
11092                         if (flow_dv_tag_resource_register(dev, tag_be,
11093                                                   dev_flow, error))
11094                                 return -rte_errno;
11095                         MLX5_ASSERT(dev_flow->dv.tag_resource);
11096                         sample_act->dr_tag_action =
11097                                 dev_flow->dv.tag_resource->action;
11098                         sample_idx->rix_tag =
11099                                 dev_flow->handle->dvh.rix_tag;
11100                         sample_actions[sample_act->actions_num++] =
11101                                                 sample_act->dr_tag_action;
11102                         /* Recover the mark resource after sample */
11103                         dev_flow->dv.tag_resource = pre_r;
11104                         dev_flow->handle->dvh.rix_tag = pre_rix;
11105                         action_flags |= MLX5_FLOW_ACTION_MARK;
11106                         break;
11107                 }
11108                 case RTE_FLOW_ACTION_TYPE_COUNT:
11109                 {
11110                         if (!flow->counter) {
11111                                 flow->counter =
11112                                         flow_dv_translate_create_counter(dev,
11113                                                 dev_flow, sub_actions->conf,
11114                                                 0);
11115                                 if (!flow->counter)
11116                                         return rte_flow_error_set
11117                                                 (error, rte_errno,
11118                                                 RTE_FLOW_ERROR_TYPE_ACTION,
11119                                                 NULL,
11120                                                 "cannot create counter"
11121                                                 " object.");
11122                         }
11123                         sample_act->dr_cnt_action =
11124                                   (flow_dv_counter_get_by_idx(dev,
11125                                   flow->counter, NULL))->action;
11126                         sample_actions[sample_act->actions_num++] =
11127                                                 sample_act->dr_cnt_action;
11128                         action_flags |= MLX5_FLOW_ACTION_COUNT;
11129                         break;
11130                 }
11131                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
11132                 {
11133                         struct mlx5_flow_dv_port_id_action_resource
11134                                         port_id_resource;
11135                         uint32_t port_id = 0;
11136
11137                         memset(&port_id_resource, 0, sizeof(port_id_resource));
11138                         /* Save the port id resource before sample */
11139                         pre_rix = dev_flow->handle->rix_port_id_action;
11140                         pre_r = dev_flow->dv.port_id_action;
11141                         if (flow_dv_translate_action_port_id(dev, sub_actions,
11142                                                              &port_id, error))
11143                                 return -rte_errno;
11144                         port_id_resource.port_id = port_id;
11145                         if (flow_dv_port_id_action_resource_register
11146                             (dev, &port_id_resource, dev_flow, error))
11147                                 return -rte_errno;
11148                         sample_act->dr_port_id_action =
11149                                 dev_flow->dv.port_id_action->action;
11150                         sample_idx->rix_port_id_action =
11151                                 dev_flow->handle->rix_port_id_action;
11152                         sample_actions[sample_act->actions_num++] =
11153                                                 sample_act->dr_port_id_action;
11154                         /* Recover the port id resource after sample */
11155                         dev_flow->dv.port_id_action = pre_r;
11156                         dev_flow->handle->rix_port_id_action = pre_rix;
11157                         (*num_of_dest)++;
11158                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
11159                         break;
11160                 }
11161                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
11162                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
11163                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
11164                         /* Save the encap resource before sample */
11165                         pre_rix = dev_flow->handle->dvh.rix_encap_decap;
11166                         pre_r = dev_flow->dv.encap_decap;
11167                         if (flow_dv_create_action_l2_encap(dev, sub_actions,
11168                                                            dev_flow,
11169                                                            attr->transfer,
11170                                                            error))
11171                                 return -rte_errno;
11172                         sample_act->dr_encap_action =
11173                                 dev_flow->dv.encap_decap->action;
11174                         sample_idx->rix_encap_decap =
11175                                 dev_flow->handle->dvh.rix_encap_decap;
11176                         sample_actions[sample_act->actions_num++] =
11177                                                 sample_act->dr_encap_action;
11178                         /* Recover the encap resource after sample */
11179                         dev_flow->dv.encap_decap = pre_r;
11180                         dev_flow->handle->dvh.rix_encap_decap = pre_rix;
11181                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
11182                         break;
11183                 default:
11184                         return rte_flow_error_set(error, EINVAL,
11185                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11186                                 NULL,
11187                                 "Not support for sampler action");
11188                 }
11189         }
11190         sample_act->action_flags = action_flags;
11191         res->ft_id = dev_flow->dv.group;
11192         if (attr->transfer) {
11193                 union {
11194                         uint32_t action_in[MLX5_ST_SZ_DW(set_action_in)];
11195                         uint64_t set_action;
11196                 } action_ctx = { .set_action = 0 };
11197
11198                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
11199                 MLX5_SET(set_action_in, action_ctx.action_in, action_type,
11200                          MLX5_MODIFICATION_TYPE_SET);
11201                 MLX5_SET(set_action_in, action_ctx.action_in, field,
11202                          MLX5_MODI_META_REG_C_0);
11203                 MLX5_SET(set_action_in, action_ctx.action_in, data,
11204                          priv->vport_meta_tag);
11205                 res->set_action = action_ctx.set_action;
11206         } else if (attr->ingress) {
11207                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
11208         } else {
11209                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX;
11210         }
11211         return 0;
11212 }
11213
11214 /**
11215  * Convert Sample action to DV specification.
11216  *
11217  * @param[in] dev
11218  *   Pointer to rte_eth_dev structure.
11219  * @param[in, out] dev_flow
11220  *   Pointer to the mlx5_flow.
11221  * @param[in] num_of_dest
11222  *   The num of destination.
11223  * @param[in, out] res
11224  *   Pointer to sample resource.
11225  * @param[in, out] mdest_res
11226  *   Pointer to destination array resource.
11227  * @param[in] sample_actions
11228  *   Pointer to sample path actions list.
11229  * @param[in] action_flags
11230  *   Holds the actions detected until now.
11231  * @param[out] error
11232  *   Pointer to the error structure.
11233  *
11234  * @return
11235  *   0 on success, a negative errno value otherwise and rte_errno is set.
11236  */
11237 static int
11238 flow_dv_create_action_sample(struct rte_eth_dev *dev,
11239                              struct mlx5_flow *dev_flow,
11240                              uint32_t num_of_dest,
11241                              struct mlx5_flow_dv_sample_resource *res,
11242                              struct mlx5_flow_dv_dest_array_resource *mdest_res,
11243                              void **sample_actions,
11244                              uint64_t action_flags,
11245                              struct rte_flow_error *error)
11246 {
11247         /* update normal path action resource into last index of array */
11248         uint32_t dest_index = MLX5_MAX_DEST_NUM - 1;
11249         struct mlx5_flow_sub_actions_list *sample_act =
11250                                         &mdest_res->sample_act[dest_index];
11251         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
11252         struct mlx5_flow_rss_desc *rss_desc;
11253         uint32_t normal_idx = 0;
11254         struct mlx5_hrxq *hrxq;
11255         uint32_t hrxq_idx;
11256
11257         MLX5_ASSERT(wks);
11258         rss_desc = &wks->rss_desc;
11259         if (num_of_dest > 1) {
11260                 if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
11261                         /* Handle QP action for mirroring */
11262                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11263                                                     rss_desc, &hrxq_idx);
11264                         if (!hrxq)
11265                                 return rte_flow_error_set
11266                                      (error, rte_errno,
11267                                       RTE_FLOW_ERROR_TYPE_ACTION,
11268                                       NULL,
11269                                       "cannot create rx queue");
11270                         normal_idx++;
11271                         mdest_res->sample_idx[dest_index].rix_hrxq = hrxq_idx;
11272                         sample_act->dr_queue_action = hrxq->action;
11273                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11274                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11275                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
11276                 }
11277                 if (sample_act->action_flags & MLX5_FLOW_ACTION_ENCAP) {
11278                         normal_idx++;
11279                         mdest_res->sample_idx[dest_index].rix_encap_decap =
11280                                 dev_flow->handle->dvh.rix_encap_decap;
11281                         sample_act->dr_encap_action =
11282                                 dev_flow->dv.encap_decap->action;
11283                         dev_flow->handle->dvh.rix_encap_decap = 0;
11284                 }
11285                 if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) {
11286                         normal_idx++;
11287                         mdest_res->sample_idx[dest_index].rix_port_id_action =
11288                                 dev_flow->handle->rix_port_id_action;
11289                         sample_act->dr_port_id_action =
11290                                 dev_flow->dv.port_id_action->action;
11291                         dev_flow->handle->rix_port_id_action = 0;
11292                 }
11293                 if (sample_act->action_flags & MLX5_FLOW_ACTION_JUMP) {
11294                         normal_idx++;
11295                         mdest_res->sample_idx[dest_index].rix_jump =
11296                                 dev_flow->handle->rix_jump;
11297                         sample_act->dr_jump_action =
11298                                 dev_flow->dv.jump->action;
11299                         dev_flow->handle->rix_jump = 0;
11300                 }
11301                 sample_act->actions_num = normal_idx;
11302                 /* update sample action resource into first index of array */
11303                 mdest_res->ft_type = res->ft_type;
11304                 memcpy(&mdest_res->sample_idx[0], &res->sample_idx,
11305                                 sizeof(struct mlx5_flow_sub_actions_idx));
11306                 memcpy(&mdest_res->sample_act[0], &res->sample_act,
11307                                 sizeof(struct mlx5_flow_sub_actions_list));
11308                 mdest_res->num_of_dest = num_of_dest;
11309                 if (flow_dv_dest_array_resource_register(dev, mdest_res,
11310                                                          dev_flow, error))
11311                         return rte_flow_error_set(error, EINVAL,
11312                                                   RTE_FLOW_ERROR_TYPE_ACTION,
11313                                                   NULL, "can't create sample "
11314                                                   "action");
11315         } else {
11316                 res->sub_actions = sample_actions;
11317                 if (flow_dv_sample_resource_register(dev, res, dev_flow, error))
11318                         return rte_flow_error_set(error, EINVAL,
11319                                                   RTE_FLOW_ERROR_TYPE_ACTION,
11320                                                   NULL,
11321                                                   "can't create sample action");
11322         }
11323         return 0;
11324 }
11325
11326 /**
11327  * Remove an ASO age action from age actions list.
11328  *
11329  * @param[in] dev
11330  *   Pointer to the Ethernet device structure.
11331  * @param[in] age
11332  *   Pointer to the aso age action handler.
11333  */
11334 static void
11335 flow_dv_aso_age_remove_from_age(struct rte_eth_dev *dev,
11336                                 struct mlx5_aso_age_action *age)
11337 {
11338         struct mlx5_age_info *age_info;
11339         struct mlx5_age_param *age_param = &age->age_params;
11340         struct mlx5_priv *priv = dev->data->dev_private;
11341         uint16_t expected = AGE_CANDIDATE;
11342
11343         age_info = GET_PORT_AGE_INFO(priv);
11344         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
11345                                          AGE_FREE, false, __ATOMIC_RELAXED,
11346                                          __ATOMIC_RELAXED)) {
11347                 /**
11348                  * We need the lock even it is age timeout,
11349                  * since age action may still in process.
11350                  */
11351                 rte_spinlock_lock(&age_info->aged_sl);
11352                 LIST_REMOVE(age, next);
11353                 rte_spinlock_unlock(&age_info->aged_sl);
11354                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
11355         }
11356 }
11357
11358 /**
11359  * Release an ASO age action.
11360  *
11361  * @param[in] dev
11362  *   Pointer to the Ethernet device structure.
11363  * @param[in] age_idx
11364  *   Index of ASO age action to release.
11365  * @param[in] flow
11366  *   True if the release operation is during flow destroy operation.
11367  *   False if the release operation is during action destroy operation.
11368  *
11369  * @return
11370  *   0 when age action was removed, otherwise the number of references.
11371  */
11372 static int
11373 flow_dv_aso_age_release(struct rte_eth_dev *dev, uint32_t age_idx)
11374 {
11375         struct mlx5_priv *priv = dev->data->dev_private;
11376         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11377         struct mlx5_aso_age_action *age = flow_aso_age_get_by_idx(dev, age_idx);
11378         uint32_t ret = __atomic_sub_fetch(&age->refcnt, 1, __ATOMIC_RELAXED);
11379
11380         if (!ret) {
11381                 flow_dv_aso_age_remove_from_age(dev, age);
11382                 rte_spinlock_lock(&mng->free_sl);
11383                 LIST_INSERT_HEAD(&mng->free, age, next);
11384                 rte_spinlock_unlock(&mng->free_sl);
11385         }
11386         return ret;
11387 }
11388
11389 /**
11390  * Resize the ASO age pools array by MLX5_CNT_CONTAINER_RESIZE pools.
11391  *
11392  * @param[in] dev
11393  *   Pointer to the Ethernet device structure.
11394  *
11395  * @return
11396  *   0 on success, otherwise negative errno value and rte_errno is set.
11397  */
11398 static int
11399 flow_dv_aso_age_pools_resize(struct rte_eth_dev *dev)
11400 {
11401         struct mlx5_priv *priv = dev->data->dev_private;
11402         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11403         void *old_pools = mng->pools;
11404         uint32_t resize = mng->n + MLX5_CNT_CONTAINER_RESIZE;
11405         uint32_t mem_size = sizeof(struct mlx5_aso_age_pool *) * resize;
11406         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
11407
11408         if (!pools) {
11409                 rte_errno = ENOMEM;
11410                 return -ENOMEM;
11411         }
11412         if (old_pools) {
11413                 memcpy(pools, old_pools,
11414                        mng->n * sizeof(struct mlx5_flow_counter_pool *));
11415                 mlx5_free(old_pools);
11416         } else {
11417                 /* First ASO flow hit allocation - starting ASO data-path. */
11418                 int ret = mlx5_aso_flow_hit_queue_poll_start(priv->sh);
11419
11420                 if (ret) {
11421                         mlx5_free(pools);
11422                         return ret;
11423                 }
11424         }
11425         mng->n = resize;
11426         mng->pools = pools;
11427         return 0;
11428 }
11429
11430 /**
11431  * Create and initialize a new ASO aging pool.
11432  *
11433  * @param[in] dev
11434  *   Pointer to the Ethernet device structure.
11435  * @param[out] age_free
11436  *   Where to put the pointer of a new age action.
11437  *
11438  * @return
11439  *   The age actions pool pointer and @p age_free is set on success,
11440  *   NULL otherwise and rte_errno is set.
11441  */
11442 static struct mlx5_aso_age_pool *
11443 flow_dv_age_pool_create(struct rte_eth_dev *dev,
11444                         struct mlx5_aso_age_action **age_free)
11445 {
11446         struct mlx5_priv *priv = dev->data->dev_private;
11447         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11448         struct mlx5_aso_age_pool *pool = NULL;
11449         struct mlx5_devx_obj *obj = NULL;
11450         uint32_t i;
11451
11452         obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->ctx,
11453                                                     priv->sh->pdn);
11454         if (!obj) {
11455                 rte_errno = ENODATA;
11456                 DRV_LOG(ERR, "Failed to create flow_hit_aso_obj using DevX.");
11457                 return NULL;
11458         }
11459         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
11460         if (!pool) {
11461                 claim_zero(mlx5_devx_cmd_destroy(obj));
11462                 rte_errno = ENOMEM;
11463                 return NULL;
11464         }
11465         pool->flow_hit_aso_obj = obj;
11466         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
11467         rte_spinlock_lock(&mng->resize_sl);
11468         pool->index = mng->next;
11469         /* Resize pools array if there is no room for the new pool in it. */
11470         if (pool->index == mng->n && flow_dv_aso_age_pools_resize(dev)) {
11471                 claim_zero(mlx5_devx_cmd_destroy(obj));
11472                 mlx5_free(pool);
11473                 rte_spinlock_unlock(&mng->resize_sl);
11474                 return NULL;
11475         }
11476         mng->pools[pool->index] = pool;
11477         mng->next++;
11478         rte_spinlock_unlock(&mng->resize_sl);
11479         /* Assign the first action in the new pool, the rest go to free list. */
11480         *age_free = &pool->actions[0];
11481         for (i = 1; i < MLX5_ASO_AGE_ACTIONS_PER_POOL; i++) {
11482                 pool->actions[i].offset = i;
11483                 LIST_INSERT_HEAD(&mng->free, &pool->actions[i], next);
11484         }
11485         return pool;
11486 }
11487
11488 /**
11489  * Allocate a ASO aging bit.
11490  *
11491  * @param[in] dev
11492  *   Pointer to the Ethernet device structure.
11493  * @param[out] error
11494  *   Pointer to the error structure.
11495  *
11496  * @return
11497  *   Index to ASO age action on success, 0 otherwise and rte_errno is set.
11498  */
11499 static uint32_t
11500 flow_dv_aso_age_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
11501 {
11502         struct mlx5_priv *priv = dev->data->dev_private;
11503         const struct mlx5_aso_age_pool *pool;
11504         struct mlx5_aso_age_action *age_free = NULL;
11505         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11506
11507         MLX5_ASSERT(mng);
11508         /* Try to get the next free age action bit. */
11509         rte_spinlock_lock(&mng->free_sl);
11510         age_free = LIST_FIRST(&mng->free);
11511         if (age_free) {
11512                 LIST_REMOVE(age_free, next);
11513         } else if (!flow_dv_age_pool_create(dev, &age_free)) {
11514                 rte_spinlock_unlock(&mng->free_sl);
11515                 rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
11516                                    NULL, "failed to create ASO age pool");
11517                 return 0; /* 0 is an error. */
11518         }
11519         rte_spinlock_unlock(&mng->free_sl);
11520         pool = container_of
11521           ((const struct mlx5_aso_age_action (*)[MLX5_ASO_AGE_ACTIONS_PER_POOL])
11522                   (age_free - age_free->offset), const struct mlx5_aso_age_pool,
11523                                                                        actions);
11524         if (!age_free->dr_action) {
11525                 int reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_FLOW_HIT, 0,
11526                                                  error);
11527
11528                 if (reg_c < 0) {
11529                         rte_flow_error_set(error, rte_errno,
11530                                            RTE_FLOW_ERROR_TYPE_ACTION,
11531                                            NULL, "failed to get reg_c "
11532                                            "for ASO flow hit");
11533                         return 0; /* 0 is an error. */
11534                 }
11535 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
11536                 age_free->dr_action = mlx5_glue->dv_create_flow_action_aso
11537                                 (priv->sh->rx_domain,
11538                                  pool->flow_hit_aso_obj->obj, age_free->offset,
11539                                  MLX5DV_DR_ACTION_FLAGS_ASO_FIRST_HIT_SET,
11540                                  (reg_c - REG_C_0));
11541 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
11542                 if (!age_free->dr_action) {
11543                         rte_errno = errno;
11544                         rte_spinlock_lock(&mng->free_sl);
11545                         LIST_INSERT_HEAD(&mng->free, age_free, next);
11546                         rte_spinlock_unlock(&mng->free_sl);
11547                         rte_flow_error_set(error, rte_errno,
11548                                            RTE_FLOW_ERROR_TYPE_ACTION,
11549                                            NULL, "failed to create ASO "
11550                                            "flow hit action");
11551                         return 0; /* 0 is an error. */
11552                 }
11553         }
11554         __atomic_store_n(&age_free->refcnt, 1, __ATOMIC_RELAXED);
11555         return pool->index | ((age_free->offset + 1) << 16);
11556 }
11557
11558 /**
11559  * Initialize flow ASO age parameters.
11560  *
11561  * @param[in] dev
11562  *   Pointer to rte_eth_dev structure.
11563  * @param[in] age_idx
11564  *   Index of ASO age action.
11565  * @param[in] context
11566  *   Pointer to flow counter age context.
11567  * @param[in] timeout
11568  *   Aging timeout in seconds.
11569  *
11570  */
11571 static void
11572 flow_dv_aso_age_params_init(struct rte_eth_dev *dev,
11573                             uint32_t age_idx,
11574                             void *context,
11575                             uint32_t timeout)
11576 {
11577         struct mlx5_aso_age_action *aso_age;
11578
11579         aso_age = flow_aso_age_get_by_idx(dev, age_idx);
11580         MLX5_ASSERT(aso_age);
11581         aso_age->age_params.context = context;
11582         aso_age->age_params.timeout = timeout;
11583         aso_age->age_params.port_id = dev->data->port_id;
11584         __atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0,
11585                          __ATOMIC_RELAXED);
11586         __atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE,
11587                          __ATOMIC_RELAXED);
11588 }
11589
11590 static void
11591 flow_dv_translate_integrity_l4(const struct rte_flow_item_integrity *mask,
11592                                const struct rte_flow_item_integrity *value,
11593                                void *headers_m, void *headers_v)
11594 {
11595         if (mask->l4_ok) {
11596                 /* application l4_ok filter aggregates all hardware l4 filters
11597                  * therefore hw l4_checksum_ok must be implicitly added here.
11598                  */
11599                 struct rte_flow_item_integrity local_item;
11600
11601                 local_item.l4_csum_ok = 1;
11602                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok,
11603                          local_item.l4_csum_ok);
11604                 if (value->l4_ok) {
11605                         /* application l4_ok = 1 matches sets both hw flags
11606                          * l4_ok and l4_checksum_ok flags to 1.
11607                          */
11608                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
11609                                  l4_checksum_ok, local_item.l4_csum_ok);
11610                         MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_ok,
11611                                  mask->l4_ok);
11612                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_ok,
11613                                  value->l4_ok);
11614                 } else {
11615                         /* application l4_ok = 0 matches on hw flag
11616                          * l4_checksum_ok = 0 only.
11617                          */
11618                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
11619                                  l4_checksum_ok, 0);
11620                 }
11621         } else if (mask->l4_csum_ok) {
11622                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok,
11623                          mask->l4_csum_ok);
11624                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_checksum_ok,
11625                          value->l4_csum_ok);
11626         }
11627 }
11628
11629 static void
11630 flow_dv_translate_integrity_l3(const struct rte_flow_item_integrity *mask,
11631                                const struct rte_flow_item_integrity *value,
11632                                void *headers_m, void *headers_v,
11633                                bool is_ipv4)
11634 {
11635         if (mask->l3_ok) {
11636                 /* application l3_ok filter aggregates all hardware l3 filters
11637                  * therefore hw ipv4_checksum_ok must be implicitly added here.
11638                  */
11639                 struct rte_flow_item_integrity local_item;
11640
11641                 local_item.ipv4_csum_ok = !!is_ipv4;
11642                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok,
11643                          local_item.ipv4_csum_ok);
11644                 if (value->l3_ok) {
11645                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
11646                                  ipv4_checksum_ok, local_item.ipv4_csum_ok);
11647                         MLX5_SET(fte_match_set_lyr_2_4, headers_m, l3_ok,
11648                                  mask->l3_ok);
11649                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, l3_ok,
11650                                  value->l3_ok);
11651                 } else {
11652                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
11653                                  ipv4_checksum_ok, 0);
11654                 }
11655         } else if (mask->ipv4_csum_ok) {
11656                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok,
11657                          mask->ipv4_csum_ok);
11658                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_checksum_ok,
11659                          value->ipv4_csum_ok);
11660         }
11661 }
11662
11663 static void
11664 flow_dv_translate_item_integrity(void *matcher, void *key,
11665                                  const struct rte_flow_item *head_item,
11666                                  const struct rte_flow_item *integrity_item)
11667 {
11668         const struct rte_flow_item_integrity *mask = integrity_item->mask;
11669         const struct rte_flow_item_integrity *value = integrity_item->spec;
11670         const struct rte_flow_item *tunnel_item, *end_item, *item;
11671         void *headers_m;
11672         void *headers_v;
11673         uint32_t l3_protocol;
11674
11675         if (!value)
11676                 return;
11677         if (!mask)
11678                 mask = &rte_flow_item_integrity_mask;
11679         if (value->level > 1) {
11680                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
11681                                          inner_headers);
11682                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
11683         } else {
11684                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
11685                                          outer_headers);
11686                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
11687         }
11688         tunnel_item = mlx5_flow_find_tunnel_item(head_item);
11689         if (value->level > 1) {
11690                 /* tunnel item was verified during the item validation */
11691                 item = tunnel_item;
11692                 end_item = mlx5_find_end_item(tunnel_item);
11693         } else {
11694                 item = head_item;
11695                 end_item = tunnel_item ? tunnel_item :
11696                            mlx5_find_end_item(integrity_item);
11697         }
11698         l3_protocol = mask->l3_ok ?
11699                       mlx5_flow_locate_proto_l3(&item, end_item) : 0;
11700         flow_dv_translate_integrity_l3(mask, value, headers_m, headers_v,
11701                                        l3_protocol == RTE_ETHER_TYPE_IPV4);
11702         flow_dv_translate_integrity_l4(mask, value, headers_m, headers_v);
11703 }
11704
11705 /**
11706  * Prepares DV flow counter with aging configuration.
11707  * Gets it by index when exists, creates a new one when doesn't.
11708  *
11709  * @param[in] dev
11710  *   Pointer to rte_eth_dev structure.
11711  * @param[in] dev_flow
11712  *   Pointer to the mlx5_flow.
11713  * @param[in, out] flow
11714  *   Pointer to the sub flow.
11715  * @param[in] count
11716  *   Pointer to the counter action configuration.
11717  * @param[in] age
11718  *   Pointer to the aging action configuration.
11719  * @param[out] error
11720  *   Pointer to the error structure.
11721  *
11722  * @return
11723  *   Pointer to the counter, NULL otherwise.
11724  */
11725 static struct mlx5_flow_counter *
11726 flow_dv_prepare_counter(struct rte_eth_dev *dev,
11727                         struct mlx5_flow *dev_flow,
11728                         struct rte_flow *flow,
11729                         const struct rte_flow_action_count *count,
11730                         const struct rte_flow_action_age *age,
11731                         struct rte_flow_error *error)
11732 {
11733         if (!flow->counter) {
11734                 flow->counter = flow_dv_translate_create_counter(dev, dev_flow,
11735                                                                  count, age);
11736                 if (!flow->counter) {
11737                         rte_flow_error_set(error, rte_errno,
11738                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11739                                            "cannot create counter object.");
11740                         return NULL;
11741                 }
11742         }
11743         return flow_dv_counter_get_by_idx(dev, flow->counter, NULL);
11744 }
11745
11746 /*
11747  * Release an ASO CT action by its own device.
11748  *
11749  * @param[in] dev
11750  *   Pointer to the Ethernet device structure.
11751  * @param[in] idx
11752  *   Index of ASO CT action to release.
11753  *
11754  * @return
11755  *   0 when CT action was removed, otherwise the number of references.
11756  */
11757 static inline int
11758 flow_dv_aso_ct_dev_release(struct rte_eth_dev *dev, uint32_t idx)
11759 {
11760         struct mlx5_priv *priv = dev->data->dev_private;
11761         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
11762         uint32_t ret;
11763         struct mlx5_aso_ct_action *ct = flow_aso_ct_get_by_dev_idx(dev, idx);
11764         enum mlx5_aso_ct_state state =
11765                         __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
11766
11767         /* Cannot release when CT is in the ASO SQ. */
11768         if (state == ASO_CONNTRACK_WAIT || state == ASO_CONNTRACK_QUERY)
11769                 return -1;
11770         ret = __atomic_sub_fetch(&ct->refcnt, 1, __ATOMIC_RELAXED);
11771         if (!ret) {
11772                 if (ct->dr_action_orig) {
11773 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
11774                         claim_zero(mlx5_glue->destroy_flow_action
11775                                         (ct->dr_action_orig));
11776 #endif
11777                         ct->dr_action_orig = NULL;
11778                 }
11779                 if (ct->dr_action_rply) {
11780 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
11781                         claim_zero(mlx5_glue->destroy_flow_action
11782                                         (ct->dr_action_rply));
11783 #endif
11784                         ct->dr_action_rply = NULL;
11785                 }
11786                 /* Clear the state to free, no need in 1st allocation. */
11787                 MLX5_ASO_CT_UPDATE_STATE(ct, ASO_CONNTRACK_FREE);
11788                 rte_spinlock_lock(&mng->ct_sl);
11789                 LIST_INSERT_HEAD(&mng->free_cts, ct, next);
11790                 rte_spinlock_unlock(&mng->ct_sl);
11791         }
11792         return (int)ret;
11793 }
11794
11795 static inline int
11796 flow_dv_aso_ct_release(struct rte_eth_dev *dev, uint32_t own_idx)
11797 {
11798         uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(own_idx);
11799         uint32_t idx = MLX5_INDIRECT_ACT_CT_GET_IDX(own_idx);
11800         struct rte_eth_dev *owndev = &rte_eth_devices[owner];
11801         RTE_SET_USED(dev);
11802
11803         MLX5_ASSERT(owner < RTE_MAX_ETHPORTS);
11804         if (dev->data->dev_started != 1)
11805                 return -1;
11806         return flow_dv_aso_ct_dev_release(owndev, idx);
11807 }
11808
11809 /*
11810  * Resize the ASO CT pools array by 64 pools.
11811  *
11812  * @param[in] dev
11813  *   Pointer to the Ethernet device structure.
11814  *
11815  * @return
11816  *   0 on success, otherwise negative errno value and rte_errno is set.
11817  */
11818 static int
11819 flow_dv_aso_ct_pools_resize(struct rte_eth_dev *dev)
11820 {
11821         struct mlx5_priv *priv = dev->data->dev_private;
11822         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
11823         void *old_pools = mng->pools;
11824         /* Magic number now, need a macro. */
11825         uint32_t resize = mng->n + 64;
11826         uint32_t mem_size = sizeof(struct mlx5_aso_ct_pool *) * resize;
11827         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
11828
11829         if (!pools) {
11830                 rte_errno = ENOMEM;
11831                 return -rte_errno;
11832         }
11833         rte_rwlock_write_lock(&mng->resize_rwl);
11834         /* ASO SQ/QP was already initialized in the startup. */
11835         if (old_pools) {
11836                 /* Realloc could be an alternative choice. */
11837                 rte_memcpy(pools, old_pools,
11838                            mng->n * sizeof(struct mlx5_aso_ct_pool *));
11839                 mlx5_free(old_pools);
11840         }
11841         mng->n = resize;
11842         mng->pools = pools;
11843         rte_rwlock_write_unlock(&mng->resize_rwl);
11844         return 0;
11845 }
11846
11847 /*
11848  * Create and initialize a new ASO CT pool.
11849  *
11850  * @param[in] dev
11851  *   Pointer to the Ethernet device structure.
11852  * @param[out] ct_free
11853  *   Where to put the pointer of a new CT action.
11854  *
11855  * @return
11856  *   The CT actions pool pointer and @p ct_free is set on success,
11857  *   NULL otherwise and rte_errno is set.
11858  */
11859 static struct mlx5_aso_ct_pool *
11860 flow_dv_ct_pool_create(struct rte_eth_dev *dev,
11861                        struct mlx5_aso_ct_action **ct_free)
11862 {
11863         struct mlx5_priv *priv = dev->data->dev_private;
11864         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
11865         struct mlx5_aso_ct_pool *pool = NULL;
11866         struct mlx5_devx_obj *obj = NULL;
11867         uint32_t i;
11868         uint32_t log_obj_size = rte_log2_u32(MLX5_ASO_CT_ACTIONS_PER_POOL);
11869
11870         obj = mlx5_devx_cmd_create_conn_track_offload_obj(priv->sh->ctx,
11871                                                 priv->sh->pdn, log_obj_size);
11872         if (!obj) {
11873                 rte_errno = ENODATA;
11874                 DRV_LOG(ERR, "Failed to create conn_track_offload_obj using DevX.");
11875                 return NULL;
11876         }
11877         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
11878         if (!pool) {
11879                 rte_errno = ENOMEM;
11880                 claim_zero(mlx5_devx_cmd_destroy(obj));
11881                 return NULL;
11882         }
11883         pool->devx_obj = obj;
11884         pool->index = mng->next;
11885         /* Resize pools array if there is no room for the new pool in it. */
11886         if (pool->index == mng->n && flow_dv_aso_ct_pools_resize(dev)) {
11887                 claim_zero(mlx5_devx_cmd_destroy(obj));
11888                 mlx5_free(pool);
11889                 return NULL;
11890         }
11891         mng->pools[pool->index] = pool;
11892         mng->next++;
11893         /* Assign the first action in the new pool, the rest go to free list. */
11894         *ct_free = &pool->actions[0];
11895         /* Lock outside, the list operation is safe here. */
11896         for (i = 1; i < MLX5_ASO_CT_ACTIONS_PER_POOL; i++) {
11897                 /* refcnt is 0 when allocating the memory. */
11898                 pool->actions[i].offset = i;
11899                 LIST_INSERT_HEAD(&mng->free_cts, &pool->actions[i], next);
11900         }
11901         return pool;
11902 }
11903
11904 /*
11905  * Allocate a ASO CT action from free list.
11906  *
11907  * @param[in] dev
11908  *   Pointer to the Ethernet device structure.
11909  * @param[out] error
11910  *   Pointer to the error structure.
11911  *
11912  * @return
11913  *   Index to ASO CT action on success, 0 otherwise and rte_errno is set.
11914  */
11915 static uint32_t
11916 flow_dv_aso_ct_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
11917 {
11918         struct mlx5_priv *priv = dev->data->dev_private;
11919         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
11920         struct mlx5_aso_ct_action *ct = NULL;
11921         struct mlx5_aso_ct_pool *pool;
11922         uint8_t reg_c;
11923         uint32_t ct_idx;
11924
11925         MLX5_ASSERT(mng);
11926         if (!priv->config.devx) {
11927                 rte_errno = ENOTSUP;
11928                 return 0;
11929         }
11930         /* Get a free CT action, if no, a new pool will be created. */
11931         rte_spinlock_lock(&mng->ct_sl);
11932         ct = LIST_FIRST(&mng->free_cts);
11933         if (ct) {
11934                 LIST_REMOVE(ct, next);
11935         } else if (!flow_dv_ct_pool_create(dev, &ct)) {
11936                 rte_spinlock_unlock(&mng->ct_sl);
11937                 rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
11938                                    NULL, "failed to create ASO CT pool");
11939                 return 0;
11940         }
11941         rte_spinlock_unlock(&mng->ct_sl);
11942         pool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);
11943         ct_idx = MLX5_MAKE_CT_IDX(pool->index, ct->offset);
11944         /* 0: inactive, 1: created, 2+: used by flows. */
11945         __atomic_store_n(&ct->refcnt, 1, __ATOMIC_RELAXED);
11946         reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, error);
11947         if (!ct->dr_action_orig) {
11948 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
11949                 ct->dr_action_orig = mlx5_glue->dv_create_flow_action_aso
11950                         (priv->sh->rx_domain, pool->devx_obj->obj,
11951                          ct->offset,
11952                          MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_INITIATOR,
11953                          reg_c - REG_C_0);
11954 #else
11955                 RTE_SET_USED(reg_c);
11956 #endif
11957                 if (!ct->dr_action_orig) {
11958                         flow_dv_aso_ct_dev_release(dev, ct_idx);
11959                         rte_flow_error_set(error, rte_errno,
11960                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11961                                            "failed to create ASO CT action");
11962                         return 0;
11963                 }
11964         }
11965         if (!ct->dr_action_rply) {
11966 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
11967                 ct->dr_action_rply = mlx5_glue->dv_create_flow_action_aso
11968                         (priv->sh->rx_domain, pool->devx_obj->obj,
11969                          ct->offset,
11970                          MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_RESPONDER,
11971                          reg_c - REG_C_0);
11972 #endif
11973                 if (!ct->dr_action_rply) {
11974                         flow_dv_aso_ct_dev_release(dev, ct_idx);
11975                         rte_flow_error_set(error, rte_errno,
11976                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11977                                            "failed to create ASO CT action");
11978                         return 0;
11979                 }
11980         }
11981         return ct_idx;
11982 }
11983
11984 /*
11985  * Create a conntrack object with context and actions by using ASO mechanism.
11986  *
11987  * @param[in] dev
11988  *   Pointer to rte_eth_dev structure.
11989  * @param[in] pro
11990  *   Pointer to conntrack information profile.
11991  * @param[out] error
11992  *   Pointer to the error structure.
11993  *
11994  * @return
11995  *   Index to conntrack object on success, 0 otherwise.
11996  */
11997 static uint32_t
11998 flow_dv_translate_create_conntrack(struct rte_eth_dev *dev,
11999                                    const struct rte_flow_action_conntrack *pro,
12000                                    struct rte_flow_error *error)
12001 {
12002         struct mlx5_priv *priv = dev->data->dev_private;
12003         struct mlx5_dev_ctx_shared *sh = priv->sh;
12004         struct mlx5_aso_ct_action *ct;
12005         uint32_t idx;
12006
12007         if (!sh->ct_aso_en)
12008                 return rte_flow_error_set(error, ENOTSUP,
12009                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12010                                           "Connection is not supported");
12011         idx = flow_dv_aso_ct_alloc(dev, error);
12012         if (!idx)
12013                 return rte_flow_error_set(error, rte_errno,
12014                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12015                                           "Failed to allocate CT object");
12016         ct = flow_aso_ct_get_by_dev_idx(dev, idx);
12017         if (mlx5_aso_ct_update_by_wqe(sh, ct, pro))
12018                 return rte_flow_error_set(error, EBUSY,
12019                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12020                                           "Failed to update CT");
12021         ct->is_original = !!pro->is_original_dir;
12022         ct->peer = pro->peer_port;
12023         return idx;
12024 }
12025
12026 /**
12027  * Fill the flow with DV spec, lock free
12028  * (mutex should be acquired by caller).
12029  *
12030  * @param[in] dev
12031  *   Pointer to rte_eth_dev structure.
12032  * @param[in, out] dev_flow
12033  *   Pointer to the sub flow.
12034  * @param[in] attr
12035  *   Pointer to the flow attributes.
12036  * @param[in] items
12037  *   Pointer to the list of items.
12038  * @param[in] actions
12039  *   Pointer to the list of actions.
12040  * @param[out] error
12041  *   Pointer to the error structure.
12042  *
12043  * @return
12044  *   0 on success, a negative errno value otherwise and rte_errno is set.
12045  */
12046 static int
12047 flow_dv_translate(struct rte_eth_dev *dev,
12048                   struct mlx5_flow *dev_flow,
12049                   const struct rte_flow_attr *attr,
12050                   const struct rte_flow_item items[],
12051                   const struct rte_flow_action actions[],
12052                   struct rte_flow_error *error)
12053 {
12054         struct mlx5_priv *priv = dev->data->dev_private;
12055         struct mlx5_dev_config *dev_conf = &priv->config;
12056         struct rte_flow *flow = dev_flow->flow;
12057         struct mlx5_flow_handle *handle = dev_flow->handle;
12058         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
12059         struct mlx5_flow_rss_desc *rss_desc;
12060         uint64_t item_flags = 0;
12061         uint64_t last_item = 0;
12062         uint64_t action_flags = 0;
12063         struct mlx5_flow_dv_matcher matcher = {
12064                 .mask = {
12065                         .size = sizeof(matcher.mask.buf) -
12066                                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
12067                 },
12068         };
12069         int actions_n = 0;
12070         bool actions_end = false;
12071         union {
12072                 struct mlx5_flow_dv_modify_hdr_resource res;
12073                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
12074                             sizeof(struct mlx5_modification_cmd) *
12075                             (MLX5_MAX_MODIFY_NUM + 1)];
12076         } mhdr_dummy;
12077         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
12078         const struct rte_flow_action_count *count = NULL;
12079         const struct rte_flow_action_age *non_shared_age = NULL;
12080         union flow_dv_attr flow_attr = { .attr = 0 };
12081         uint32_t tag_be;
12082         union mlx5_flow_tbl_key tbl_key;
12083         uint32_t modify_action_position = UINT32_MAX;
12084         void *match_mask = matcher.mask.buf;
12085         void *match_value = dev_flow->dv.value.buf;
12086         uint8_t next_protocol = 0xff;
12087         struct rte_vlan_hdr vlan = { 0 };
12088         struct mlx5_flow_dv_dest_array_resource mdest_res;
12089         struct mlx5_flow_dv_sample_resource sample_res;
12090         void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
12091         const struct rte_flow_action_sample *sample = NULL;
12092         struct mlx5_flow_sub_actions_list *sample_act;
12093         uint32_t sample_act_pos = UINT32_MAX;
12094         uint32_t age_act_pos = UINT32_MAX;
12095         uint32_t num_of_dest = 0;
12096         int tmp_actions_n = 0;
12097         uint32_t table;
12098         int ret = 0;
12099         const struct mlx5_flow_tunnel *tunnel = NULL;
12100         struct flow_grp_info grp_info = {
12101                 .external = !!dev_flow->external,
12102                 .transfer = !!attr->transfer,
12103                 .fdb_def_rule = !!priv->fdb_def_rule,
12104                 .skip_scale = dev_flow->skip_scale &
12105                         (1 << MLX5_SCALE_FLOW_GROUP_BIT),
12106                 .std_tbl_fix = true,
12107         };
12108         const struct rte_flow_item *head_item = items;
12109
12110         if (!wks)
12111                 return rte_flow_error_set(error, ENOMEM,
12112                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12113                                           NULL,
12114                                           "failed to push flow workspace");
12115         rss_desc = &wks->rss_desc;
12116         memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
12117         memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
12118         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
12119                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
12120         /* update normal path action resource into last index of array */
12121         sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
12122         if (is_tunnel_offload_active(dev)) {
12123                 if (dev_flow->tunnel) {
12124                         RTE_VERIFY(dev_flow->tof_type ==
12125                                    MLX5_TUNNEL_OFFLOAD_MISS_RULE);
12126                         tunnel = dev_flow->tunnel;
12127                 } else {
12128                         tunnel = mlx5_get_tof(items, actions,
12129                                               &dev_flow->tof_type);
12130                         dev_flow->tunnel = tunnel;
12131                 }
12132                 grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
12133                                         (dev, attr, tunnel, dev_flow->tof_type);
12134         }
12135         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
12136                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
12137         ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
12138                                        &grp_info, error);
12139         if (ret)
12140                 return ret;
12141         dev_flow->dv.group = table;
12142         if (attr->transfer)
12143                 mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
12144         /* number of actions must be set to 0 in case of dirty stack. */
12145         mhdr_res->actions_num = 0;
12146         if (is_flow_tunnel_match_rule(dev_flow->tof_type)) {
12147                 /*
12148                  * do not add decap action if match rule drops packet
12149                  * HW rejects rules with decap & drop
12150                  *
12151                  * if tunnel match rule was inserted before matching tunnel set
12152                  * rule flow table used in the match rule must be registered.
12153                  * current implementation handles that in the
12154                  * flow_dv_match_register() at the function end.
12155                  */
12156                 bool add_decap = true;
12157                 const struct rte_flow_action *ptr = actions;
12158
12159                 for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
12160                         if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
12161                                 add_decap = false;
12162                                 break;
12163                         }
12164                 }
12165                 if (add_decap) {
12166                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
12167                                                            attr->transfer,
12168                                                            error))
12169                                 return -rte_errno;
12170                         dev_flow->dv.actions[actions_n++] =
12171                                         dev_flow->dv.encap_decap->action;
12172                         action_flags |= MLX5_FLOW_ACTION_DECAP;
12173                 }
12174         }
12175         for (; !actions_end ; actions++) {
12176                 const struct rte_flow_action_queue *queue;
12177                 const struct rte_flow_action_rss *rss;
12178                 const struct rte_flow_action *action = actions;
12179                 const uint8_t *rss_key;
12180                 struct mlx5_flow_tbl_resource *tbl;
12181                 struct mlx5_aso_age_action *age_act;
12182                 struct mlx5_flow_counter *cnt_act;
12183                 uint32_t port_id = 0;
12184                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
12185                 int action_type = actions->type;
12186                 const struct rte_flow_action *found_action = NULL;
12187                 uint32_t jump_group = 0;
12188                 uint32_t owner_idx;
12189                 struct mlx5_aso_ct_action *ct;
12190
12191                 if (!mlx5_flow_os_action_supported(action_type))
12192                         return rte_flow_error_set(error, ENOTSUP,
12193                                                   RTE_FLOW_ERROR_TYPE_ACTION,
12194                                                   actions,
12195                                                   "action not supported");
12196                 switch (action_type) {
12197                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
12198                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
12199                         break;
12200                 case RTE_FLOW_ACTION_TYPE_VOID:
12201                         break;
12202                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
12203                         if (flow_dv_translate_action_port_id(dev, action,
12204                                                              &port_id, error))
12205                                 return -rte_errno;
12206                         port_id_resource.port_id = port_id;
12207                         MLX5_ASSERT(!handle->rix_port_id_action);
12208                         if (flow_dv_port_id_action_resource_register
12209                             (dev, &port_id_resource, dev_flow, error))
12210                                 return -rte_errno;
12211                         dev_flow->dv.actions[actions_n++] =
12212                                         dev_flow->dv.port_id_action->action;
12213                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
12214                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
12215                         sample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID;
12216                         num_of_dest++;
12217                         break;
12218                 case RTE_FLOW_ACTION_TYPE_FLAG:
12219                         action_flags |= MLX5_FLOW_ACTION_FLAG;
12220                         dev_flow->handle->mark = 1;
12221                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
12222                                 struct rte_flow_action_mark mark = {
12223                                         .id = MLX5_FLOW_MARK_DEFAULT,
12224                                 };
12225
12226                                 if (flow_dv_convert_action_mark(dev, &mark,
12227                                                                 mhdr_res,
12228                                                                 error))
12229                                         return -rte_errno;
12230                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
12231                                 break;
12232                         }
12233                         tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
12234                         /*
12235                          * Only one FLAG or MARK is supported per device flow
12236                          * right now. So the pointer to the tag resource must be
12237                          * zero before the register process.
12238                          */
12239                         MLX5_ASSERT(!handle->dvh.rix_tag);
12240                         if (flow_dv_tag_resource_register(dev, tag_be,
12241                                                           dev_flow, error))
12242                                 return -rte_errno;
12243                         MLX5_ASSERT(dev_flow->dv.tag_resource);
12244                         dev_flow->dv.actions[actions_n++] =
12245                                         dev_flow->dv.tag_resource->action;
12246                         break;
12247                 case RTE_FLOW_ACTION_TYPE_MARK:
12248                         action_flags |= MLX5_FLOW_ACTION_MARK;
12249                         dev_flow->handle->mark = 1;
12250                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
12251                                 const struct rte_flow_action_mark *mark =
12252                                         (const struct rte_flow_action_mark *)
12253                                                 actions->conf;
12254
12255                                 if (flow_dv_convert_action_mark(dev, mark,
12256                                                                 mhdr_res,
12257                                                                 error))
12258                                         return -rte_errno;
12259                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
12260                                 break;
12261                         }
12262                         /* Fall-through */
12263                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
12264                         /* Legacy (non-extensive) MARK action. */
12265                         tag_be = mlx5_flow_mark_set
12266                               (((const struct rte_flow_action_mark *)
12267                                (actions->conf))->id);
12268                         MLX5_ASSERT(!handle->dvh.rix_tag);
12269                         if (flow_dv_tag_resource_register(dev, tag_be,
12270                                                           dev_flow, error))
12271                                 return -rte_errno;
12272                         MLX5_ASSERT(dev_flow->dv.tag_resource);
12273                         dev_flow->dv.actions[actions_n++] =
12274                                         dev_flow->dv.tag_resource->action;
12275                         break;
12276                 case RTE_FLOW_ACTION_TYPE_SET_META:
12277                         if (flow_dv_convert_action_set_meta
12278                                 (dev, mhdr_res, attr,
12279                                  (const struct rte_flow_action_set_meta *)
12280                                   actions->conf, error))
12281                                 return -rte_errno;
12282                         action_flags |= MLX5_FLOW_ACTION_SET_META;
12283                         break;
12284                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
12285                         if (flow_dv_convert_action_set_tag
12286                                 (dev, mhdr_res,
12287                                  (const struct rte_flow_action_set_tag *)
12288                                   actions->conf, error))
12289                                 return -rte_errno;
12290                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
12291                         break;
12292                 case RTE_FLOW_ACTION_TYPE_DROP:
12293                         action_flags |= MLX5_FLOW_ACTION_DROP;
12294                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
12295                         break;
12296                 case RTE_FLOW_ACTION_TYPE_QUEUE:
12297                         queue = actions->conf;
12298                         rss_desc->queue_num = 1;
12299                         rss_desc->queue[0] = queue->index;
12300                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
12301                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
12302                         sample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE;
12303                         num_of_dest++;
12304                         break;
12305                 case RTE_FLOW_ACTION_TYPE_RSS:
12306                         rss = actions->conf;
12307                         memcpy(rss_desc->queue, rss->queue,
12308                                rss->queue_num * sizeof(uint16_t));
12309                         rss_desc->queue_num = rss->queue_num;
12310                         /* NULL RSS key indicates default RSS key. */
12311                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
12312                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
12313                         /*
12314                          * rss->level and rss.types should be set in advance
12315                          * when expanding items for RSS.
12316                          */
12317                         action_flags |= MLX5_FLOW_ACTION_RSS;
12318                         dev_flow->handle->fate_action = rss_desc->shared_rss ?
12319                                 MLX5_FLOW_FATE_SHARED_RSS :
12320                                 MLX5_FLOW_FATE_QUEUE;
12321                         break;
12322                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
12323                         flow->age = (uint32_t)(uintptr_t)(action->conf);
12324                         age_act = flow_aso_age_get_by_idx(dev, flow->age);
12325                         __atomic_fetch_add(&age_act->refcnt, 1,
12326                                            __ATOMIC_RELAXED);
12327                         age_act_pos = actions_n++;
12328                         action_flags |= MLX5_FLOW_ACTION_AGE;
12329                         break;
12330                 case RTE_FLOW_ACTION_TYPE_AGE:
12331                         non_shared_age = action->conf;
12332                         age_act_pos = actions_n++;
12333                         action_flags |= MLX5_FLOW_ACTION_AGE;
12334                         break;
12335                 case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
12336                         flow->counter = (uint32_t)(uintptr_t)(action->conf);
12337                         cnt_act = flow_dv_counter_get_by_idx(dev, flow->counter,
12338                                                              NULL);
12339                         __atomic_fetch_add(&cnt_act->shared_info.refcnt, 1,
12340                                            __ATOMIC_RELAXED);
12341                         /* Save information first, will apply later. */
12342                         action_flags |= MLX5_FLOW_ACTION_COUNT;
12343                         break;
12344                 case RTE_FLOW_ACTION_TYPE_COUNT:
12345                         if (!dev_conf->devx) {
12346                                 return rte_flow_error_set
12347                                               (error, ENOTSUP,
12348                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12349                                                NULL,
12350                                                "count action not supported");
12351                         }
12352                         /* Save information first, will apply later. */
12353                         count = action->conf;
12354                         action_flags |= MLX5_FLOW_ACTION_COUNT;
12355                         break;
12356                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
12357                         dev_flow->dv.actions[actions_n++] =
12358                                                 priv->sh->pop_vlan_action;
12359                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
12360                         break;
12361                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
12362                         if (!(action_flags &
12363                               MLX5_FLOW_ACTION_OF_SET_VLAN_VID))
12364                                 flow_dev_get_vlan_info_from_items(items, &vlan);
12365                         vlan.eth_proto = rte_be_to_cpu_16
12366                              ((((const struct rte_flow_action_of_push_vlan *)
12367                                                    actions->conf)->ethertype));
12368                         found_action = mlx5_flow_find_action
12369                                         (actions + 1,
12370                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
12371                         if (found_action)
12372                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
12373                         found_action = mlx5_flow_find_action
12374                                         (actions + 1,
12375                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
12376                         if (found_action)
12377                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
12378                         if (flow_dv_create_action_push_vlan
12379                                             (dev, attr, &vlan, dev_flow, error))
12380                                 return -rte_errno;
12381                         dev_flow->dv.actions[actions_n++] =
12382                                         dev_flow->dv.push_vlan_res->action;
12383                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
12384                         break;
12385                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
12386                         /* of_vlan_push action handled this action */
12387                         MLX5_ASSERT(action_flags &
12388                                     MLX5_FLOW_ACTION_OF_PUSH_VLAN);
12389                         break;
12390                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
12391                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
12392                                 break;
12393                         flow_dev_get_vlan_info_from_items(items, &vlan);
12394                         mlx5_update_vlan_vid_pcp(actions, &vlan);
12395                         /* If no VLAN push - this is a modify header action */
12396                         if (flow_dv_convert_action_modify_vlan_vid
12397                                                 (mhdr_res, actions, error))
12398                                 return -rte_errno;
12399                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
12400                         break;
12401                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
12402                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
12403                         if (flow_dv_create_action_l2_encap(dev, actions,
12404                                                            dev_flow,
12405                                                            attr->transfer,
12406                                                            error))
12407                                 return -rte_errno;
12408                         dev_flow->dv.actions[actions_n++] =
12409                                         dev_flow->dv.encap_decap->action;
12410                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
12411                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
12412                                 sample_act->action_flags |=
12413                                                         MLX5_FLOW_ACTION_ENCAP;
12414                         break;
12415                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
12416                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
12417                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
12418                                                            attr->transfer,
12419                                                            error))
12420                                 return -rte_errno;
12421                         dev_flow->dv.actions[actions_n++] =
12422                                         dev_flow->dv.encap_decap->action;
12423                         action_flags |= MLX5_FLOW_ACTION_DECAP;
12424                         break;
12425                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
12426                         /* Handle encap with preceding decap. */
12427                         if (action_flags & MLX5_FLOW_ACTION_DECAP) {
12428                                 if (flow_dv_create_action_raw_encap
12429                                         (dev, actions, dev_flow, attr, error))
12430                                         return -rte_errno;
12431                                 dev_flow->dv.actions[actions_n++] =
12432                                         dev_flow->dv.encap_decap->action;
12433                         } else {
12434                                 /* Handle encap without preceding decap. */
12435                                 if (flow_dv_create_action_l2_encap
12436                                     (dev, actions, dev_flow, attr->transfer,
12437                                      error))
12438                                         return -rte_errno;
12439                                 dev_flow->dv.actions[actions_n++] =
12440                                         dev_flow->dv.encap_decap->action;
12441                         }
12442                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
12443                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
12444                                 sample_act->action_flags |=
12445                                                         MLX5_FLOW_ACTION_ENCAP;
12446                         break;
12447                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
12448                         while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
12449                                 ;
12450                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
12451                                 if (flow_dv_create_action_l2_decap
12452                                     (dev, dev_flow, attr->transfer, error))
12453                                         return -rte_errno;
12454                                 dev_flow->dv.actions[actions_n++] =
12455                                         dev_flow->dv.encap_decap->action;
12456                         }
12457                         /* If decap is followed by encap, handle it at encap. */
12458                         action_flags |= MLX5_FLOW_ACTION_DECAP;
12459                         break;
12460                 case MLX5_RTE_FLOW_ACTION_TYPE_JUMP:
12461                         dev_flow->dv.actions[actions_n++] =
12462                                 (void *)(uintptr_t)action->conf;
12463                         action_flags |= MLX5_FLOW_ACTION_JUMP;
12464                         break;
12465                 case RTE_FLOW_ACTION_TYPE_JUMP:
12466                         jump_group = ((const struct rte_flow_action_jump *)
12467                                                         action->conf)->group;
12468                         grp_info.std_tbl_fix = 0;
12469                         if (dev_flow->skip_scale &
12470                                 (1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT))
12471                                 grp_info.skip_scale = 1;
12472                         else
12473                                 grp_info.skip_scale = 0;
12474                         ret = mlx5_flow_group_to_table(dev, tunnel,
12475                                                        jump_group,
12476                                                        &table,
12477                                                        &grp_info, error);
12478                         if (ret)
12479                                 return ret;
12480                         tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
12481                                                        attr->transfer,
12482                                                        !!dev_flow->external,
12483                                                        tunnel, jump_group, 0,
12484                                                        0, error);
12485                         if (!tbl)
12486                                 return rte_flow_error_set
12487                                                 (error, errno,
12488                                                  RTE_FLOW_ERROR_TYPE_ACTION,
12489                                                  NULL,
12490                                                  "cannot create jump action.");
12491                         if (flow_dv_jump_tbl_resource_register
12492                             (dev, tbl, dev_flow, error)) {
12493                                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
12494                                 return rte_flow_error_set
12495                                                 (error, errno,
12496                                                  RTE_FLOW_ERROR_TYPE_ACTION,
12497                                                  NULL,
12498                                                  "cannot create jump action.");
12499                         }
12500                         dev_flow->dv.actions[actions_n++] =
12501                                         dev_flow->dv.jump->action;
12502                         action_flags |= MLX5_FLOW_ACTION_JUMP;
12503                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;
12504                         sample_act->action_flags |= MLX5_FLOW_ACTION_JUMP;
12505                         num_of_dest++;
12506                         break;
12507                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
12508                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
12509                         if (flow_dv_convert_action_modify_mac
12510                                         (mhdr_res, actions, error))
12511                                 return -rte_errno;
12512                         action_flags |= actions->type ==
12513                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
12514                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
12515                                         MLX5_FLOW_ACTION_SET_MAC_DST;
12516                         break;
12517                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
12518                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
12519                         if (flow_dv_convert_action_modify_ipv4
12520                                         (mhdr_res, actions, error))
12521                                 return -rte_errno;
12522                         action_flags |= actions->type ==
12523                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
12524                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
12525                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
12526                         break;
12527                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
12528                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
12529                         if (flow_dv_convert_action_modify_ipv6
12530                                         (mhdr_res, actions, error))
12531                                 return -rte_errno;
12532                         action_flags |= actions->type ==
12533                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
12534                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
12535                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
12536                         break;
12537                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
12538                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
12539                         if (flow_dv_convert_action_modify_tp
12540                                         (mhdr_res, actions, items,
12541                                          &flow_attr, dev_flow, !!(action_flags &
12542                                          MLX5_FLOW_ACTION_DECAP), error))
12543                                 return -rte_errno;
12544                         action_flags |= actions->type ==
12545                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
12546                                         MLX5_FLOW_ACTION_SET_TP_SRC :
12547                                         MLX5_FLOW_ACTION_SET_TP_DST;
12548                         break;
12549                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
12550                         if (flow_dv_convert_action_modify_dec_ttl
12551                                         (mhdr_res, items, &flow_attr, dev_flow,
12552                                          !!(action_flags &
12553                                          MLX5_FLOW_ACTION_DECAP), error))
12554                                 return -rte_errno;
12555                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
12556                         break;
12557                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
12558                         if (flow_dv_convert_action_modify_ttl
12559                                         (mhdr_res, actions, items, &flow_attr,
12560                                          dev_flow, !!(action_flags &
12561                                          MLX5_FLOW_ACTION_DECAP), error))
12562                                 return -rte_errno;
12563                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
12564                         break;
12565                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
12566                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
12567                         if (flow_dv_convert_action_modify_tcp_seq
12568                                         (mhdr_res, actions, error))
12569                                 return -rte_errno;
12570                         action_flags |= actions->type ==
12571                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
12572                                         MLX5_FLOW_ACTION_INC_TCP_SEQ :
12573                                         MLX5_FLOW_ACTION_DEC_TCP_SEQ;
12574                         break;
12575
12576                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
12577                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
12578                         if (flow_dv_convert_action_modify_tcp_ack
12579                                         (mhdr_res, actions, error))
12580                                 return -rte_errno;
12581                         action_flags |= actions->type ==
12582                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
12583                                         MLX5_FLOW_ACTION_INC_TCP_ACK :
12584                                         MLX5_FLOW_ACTION_DEC_TCP_ACK;
12585                         break;
12586                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
12587                         if (flow_dv_convert_action_set_reg
12588                                         (mhdr_res, actions, error))
12589                                 return -rte_errno;
12590                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
12591                         break;
12592                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
12593                         if (flow_dv_convert_action_copy_mreg
12594                                         (dev, mhdr_res, actions, error))
12595                                 return -rte_errno;
12596                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
12597                         break;
12598                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
12599                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
12600                         dev_flow->handle->fate_action =
12601                                         MLX5_FLOW_FATE_DEFAULT_MISS;
12602                         break;
12603                 case RTE_FLOW_ACTION_TYPE_METER:
12604                         if (!wks->fm)
12605                                 return rte_flow_error_set(error, rte_errno,
12606                                         RTE_FLOW_ERROR_TYPE_ACTION,
12607                                         NULL, "Failed to get meter in flow.");
12608                         /* Set the meter action. */
12609                         dev_flow->dv.actions[actions_n++] =
12610                                 wks->fm->meter_action;
12611                         action_flags |= MLX5_FLOW_ACTION_METER;
12612                         break;
12613                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
12614                         if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,
12615                                                               actions, error))
12616                                 return -rte_errno;
12617                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
12618                         break;
12619                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
12620                         if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,
12621                                                               actions, error))
12622                                 return -rte_errno;
12623                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
12624                         break;
12625                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
12626                         sample_act_pos = actions_n;
12627                         sample = (const struct rte_flow_action_sample *)
12628                                  action->conf;
12629                         actions_n++;
12630                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
12631                         /* put encap action into group if work with port id */
12632                         if ((action_flags & MLX5_FLOW_ACTION_ENCAP) &&
12633                             (action_flags & MLX5_FLOW_ACTION_PORT_ID))
12634                                 sample_act->action_flags |=
12635                                                         MLX5_FLOW_ACTION_ENCAP;
12636                         break;
12637                 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
12638                         if (flow_dv_convert_action_modify_field
12639                                         (dev, mhdr_res, actions, attr, error))
12640                                 return -rte_errno;
12641                         action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
12642                         break;
12643                 case RTE_FLOW_ACTION_TYPE_CONNTRACK:
12644                         owner_idx = (uint32_t)(uintptr_t)action->conf;
12645                         ct = flow_aso_ct_get_by_idx(dev, owner_idx);
12646                         if (!ct)
12647                                 return rte_flow_error_set(error, EINVAL,
12648                                                 RTE_FLOW_ERROR_TYPE_ACTION,
12649                                                 NULL,
12650                                                 "Failed to get CT object.");
12651                         if (mlx5_aso_ct_available(priv->sh, ct))
12652                                 return rte_flow_error_set(error, rte_errno,
12653                                                 RTE_FLOW_ERROR_TYPE_ACTION,
12654                                                 NULL,
12655                                                 "CT is unavailable.");
12656                         if (ct->is_original)
12657                                 dev_flow->dv.actions[actions_n] =
12658                                                         ct->dr_action_orig;
12659                         else
12660                                 dev_flow->dv.actions[actions_n] =
12661                                                         ct->dr_action_rply;
12662                         flow->indirect_type = MLX5_INDIRECT_ACTION_TYPE_CT;
12663                         flow->ct = owner_idx;
12664                         __atomic_fetch_add(&ct->refcnt, 1, __ATOMIC_RELAXED);
12665                         actions_n++;
12666                         action_flags |= MLX5_FLOW_ACTION_CT;
12667                         break;
12668                 case RTE_FLOW_ACTION_TYPE_END:
12669                         actions_end = true;
12670                         if (mhdr_res->actions_num) {
12671                                 /* create modify action if needed. */
12672                                 if (flow_dv_modify_hdr_resource_register
12673                                         (dev, mhdr_res, dev_flow, error))
12674                                         return -rte_errno;
12675                                 dev_flow->dv.actions[modify_action_position] =
12676                                         handle->dvh.modify_hdr->action;
12677                         }
12678                         /*
12679                          * Handle AGE and COUNT action by single HW counter
12680                          * when they are not shared.
12681                          */
12682                         if (action_flags & MLX5_FLOW_ACTION_AGE) {
12683                                 if ((non_shared_age &&
12684                                      count && !count->shared) ||
12685                                     !(priv->sh->flow_hit_aso_en &&
12686                                       (attr->group || attr->transfer))) {
12687                                         /* Creates age by counters. */
12688                                         cnt_act = flow_dv_prepare_counter
12689                                                                 (dev, dev_flow,
12690                                                                  flow, count,
12691                                                                  non_shared_age,
12692                                                                  error);
12693                                         if (!cnt_act)
12694                                                 return -rte_errno;
12695                                         dev_flow->dv.actions[age_act_pos] =
12696                                                                 cnt_act->action;
12697                                         break;
12698                                 }
12699                                 if (!flow->age && non_shared_age) {
12700                                         flow->age = flow_dv_aso_age_alloc
12701                                                                 (dev, error);
12702                                         if (!flow->age)
12703                                                 return -rte_errno;
12704                                         flow_dv_aso_age_params_init
12705                                                     (dev, flow->age,
12706                                                      non_shared_age->context ?
12707                                                      non_shared_age->context :
12708                                                      (void *)(uintptr_t)
12709                                                      (dev_flow->flow_idx),
12710                                                      non_shared_age->timeout);
12711                                 }
12712                                 age_act = flow_aso_age_get_by_idx(dev,
12713                                                                   flow->age);
12714                                 dev_flow->dv.actions[age_act_pos] =
12715                                                              age_act->dr_action;
12716                         }
12717                         if (action_flags & MLX5_FLOW_ACTION_COUNT) {
12718                                 /*
12719                                  * Create one count action, to be used
12720                                  * by all sub-flows.
12721                                  */
12722                                 cnt_act = flow_dv_prepare_counter(dev, dev_flow,
12723                                                                   flow, count,
12724                                                                   NULL, error);
12725                                 if (!cnt_act)
12726                                         return -rte_errno;
12727                                 dev_flow->dv.actions[actions_n++] =
12728                                                                 cnt_act->action;
12729                         }
12730                 default:
12731                         break;
12732                 }
12733                 if (mhdr_res->actions_num &&
12734                     modify_action_position == UINT32_MAX)
12735                         modify_action_position = actions_n++;
12736         }
12737         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
12738                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
12739                 int item_type = items->type;
12740
12741                 if (!mlx5_flow_os_item_supported(item_type))
12742                         return rte_flow_error_set(error, ENOTSUP,
12743                                                   RTE_FLOW_ERROR_TYPE_ITEM,
12744                                                   NULL, "item not supported");
12745                 switch (item_type) {
12746                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
12747                         flow_dv_translate_item_port_id
12748                                 (dev, match_mask, match_value, items, attr);
12749                         last_item = MLX5_FLOW_ITEM_PORT_ID;
12750                         break;
12751                 case RTE_FLOW_ITEM_TYPE_ETH:
12752                         flow_dv_translate_item_eth(match_mask, match_value,
12753                                                    items, tunnel,
12754                                                    dev_flow->dv.group);
12755                         matcher.priority = action_flags &
12756                                         MLX5_FLOW_ACTION_DEFAULT_MISS &&
12757                                         !dev_flow->external ?
12758                                         MLX5_PRIORITY_MAP_L3 :
12759                                         MLX5_PRIORITY_MAP_L2;
12760                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
12761                                              MLX5_FLOW_LAYER_OUTER_L2;
12762                         break;
12763                 case RTE_FLOW_ITEM_TYPE_VLAN:
12764                         flow_dv_translate_item_vlan(dev_flow,
12765                                                     match_mask, match_value,
12766                                                     items, tunnel,
12767                                                     dev_flow->dv.group);
12768                         matcher.priority = MLX5_PRIORITY_MAP_L2;
12769                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
12770                                               MLX5_FLOW_LAYER_INNER_VLAN) :
12771                                              (MLX5_FLOW_LAYER_OUTER_L2 |
12772                                               MLX5_FLOW_LAYER_OUTER_VLAN);
12773                         break;
12774                 case RTE_FLOW_ITEM_TYPE_IPV4:
12775                         mlx5_flow_tunnel_ip_check(items, next_protocol,
12776                                                   &item_flags, &tunnel);
12777                         flow_dv_translate_item_ipv4(match_mask, match_value,
12778                                                     items, tunnel,
12779                                                     dev_flow->dv.group);
12780                         matcher.priority = MLX5_PRIORITY_MAP_L3;
12781                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
12782                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
12783                         if (items->mask != NULL &&
12784                             ((const struct rte_flow_item_ipv4 *)
12785                              items->mask)->hdr.next_proto_id) {
12786                                 next_protocol =
12787                                         ((const struct rte_flow_item_ipv4 *)
12788                                          (items->spec))->hdr.next_proto_id;
12789                                 next_protocol &=
12790                                         ((const struct rte_flow_item_ipv4 *)
12791                                          (items->mask))->hdr.next_proto_id;
12792                         } else {
12793                                 /* Reset for inner layer. */
12794                                 next_protocol = 0xff;
12795                         }
12796                         break;
12797                 case RTE_FLOW_ITEM_TYPE_IPV6:
12798                         mlx5_flow_tunnel_ip_check(items, next_protocol,
12799                                                   &item_flags, &tunnel);
12800                         flow_dv_translate_item_ipv6(match_mask, match_value,
12801                                                     items, tunnel,
12802                                                     dev_flow->dv.group);
12803                         matcher.priority = MLX5_PRIORITY_MAP_L3;
12804                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
12805                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
12806                         if (items->mask != NULL &&
12807                             ((const struct rte_flow_item_ipv6 *)
12808                              items->mask)->hdr.proto) {
12809                                 next_protocol =
12810                                         ((const struct rte_flow_item_ipv6 *)
12811                                          items->spec)->hdr.proto;
12812                                 next_protocol &=
12813                                         ((const struct rte_flow_item_ipv6 *)
12814                                          items->mask)->hdr.proto;
12815                         } else {
12816                                 /* Reset for inner layer. */
12817                                 next_protocol = 0xff;
12818                         }
12819                         break;
12820                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
12821                         flow_dv_translate_item_ipv6_frag_ext(match_mask,
12822                                                              match_value,
12823                                                              items, tunnel);
12824                         last_item = tunnel ?
12825                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
12826                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
12827                         if (items->mask != NULL &&
12828                             ((const struct rte_flow_item_ipv6_frag_ext *)
12829                              items->mask)->hdr.next_header) {
12830                                 next_protocol =
12831                                 ((const struct rte_flow_item_ipv6_frag_ext *)
12832                                  items->spec)->hdr.next_header;
12833                                 next_protocol &=
12834                                 ((const struct rte_flow_item_ipv6_frag_ext *)
12835                                  items->mask)->hdr.next_header;
12836                         } else {
12837                                 /* Reset for inner layer. */
12838                                 next_protocol = 0xff;
12839                         }
12840                         break;
12841                 case RTE_FLOW_ITEM_TYPE_TCP:
12842                         flow_dv_translate_item_tcp(match_mask, match_value,
12843                                                    items, tunnel);
12844                         matcher.priority = MLX5_PRIORITY_MAP_L4;
12845                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
12846                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
12847                         break;
12848                 case RTE_FLOW_ITEM_TYPE_UDP:
12849                         flow_dv_translate_item_udp(match_mask, match_value,
12850                                                    items, tunnel);
12851                         matcher.priority = MLX5_PRIORITY_MAP_L4;
12852                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
12853                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
12854                         break;
12855                 case RTE_FLOW_ITEM_TYPE_GRE:
12856                         flow_dv_translate_item_gre(match_mask, match_value,
12857                                                    items, tunnel);
12858                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
12859                         last_item = MLX5_FLOW_LAYER_GRE;
12860                         break;
12861                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
12862                         flow_dv_translate_item_gre_key(match_mask,
12863                                                        match_value, items);
12864                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
12865                         break;
12866                 case RTE_FLOW_ITEM_TYPE_NVGRE:
12867                         flow_dv_translate_item_nvgre(match_mask, match_value,
12868                                                      items, tunnel);
12869                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
12870                         last_item = MLX5_FLOW_LAYER_GRE;
12871                         break;
12872                 case RTE_FLOW_ITEM_TYPE_VXLAN:
12873                         flow_dv_translate_item_vxlan(match_mask, match_value,
12874                                                      items, tunnel);
12875                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
12876                         last_item = MLX5_FLOW_LAYER_VXLAN;
12877                         break;
12878                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
12879                         flow_dv_translate_item_vxlan_gpe(match_mask,
12880                                                          match_value, items,
12881                                                          tunnel);
12882                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
12883                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
12884                         break;
12885                 case RTE_FLOW_ITEM_TYPE_GENEVE:
12886                         flow_dv_translate_item_geneve(match_mask, match_value,
12887                                                       items, tunnel);
12888                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
12889                         last_item = MLX5_FLOW_LAYER_GENEVE;
12890                         break;
12891                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
12892                         ret = flow_dv_translate_item_geneve_opt(dev, match_mask,
12893                                                           match_value,
12894                                                           items, error);
12895                         if (ret)
12896                                 return rte_flow_error_set(error, -ret,
12897                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
12898                                         "cannot create GENEVE TLV option");
12899                         flow->geneve_tlv_option = 1;
12900                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
12901                         break;
12902                 case RTE_FLOW_ITEM_TYPE_MPLS:
12903                         flow_dv_translate_item_mpls(match_mask, match_value,
12904                                                     items, last_item, tunnel);
12905                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
12906                         last_item = MLX5_FLOW_LAYER_MPLS;
12907                         break;
12908                 case RTE_FLOW_ITEM_TYPE_MARK:
12909                         flow_dv_translate_item_mark(dev, match_mask,
12910                                                     match_value, items);
12911                         last_item = MLX5_FLOW_ITEM_MARK;
12912                         break;
12913                 case RTE_FLOW_ITEM_TYPE_META:
12914                         flow_dv_translate_item_meta(dev, match_mask,
12915                                                     match_value, attr, items);
12916                         last_item = MLX5_FLOW_ITEM_METADATA;
12917                         break;
12918                 case RTE_FLOW_ITEM_TYPE_ICMP:
12919                         flow_dv_translate_item_icmp(match_mask, match_value,
12920                                                     items, tunnel);
12921                         last_item = MLX5_FLOW_LAYER_ICMP;
12922                         break;
12923                 case RTE_FLOW_ITEM_TYPE_ICMP6:
12924                         flow_dv_translate_item_icmp6(match_mask, match_value,
12925                                                       items, tunnel);
12926                         last_item = MLX5_FLOW_LAYER_ICMP6;
12927                         break;
12928                 case RTE_FLOW_ITEM_TYPE_TAG:
12929                         flow_dv_translate_item_tag(dev, match_mask,
12930                                                    match_value, items);
12931                         last_item = MLX5_FLOW_ITEM_TAG;
12932                         break;
12933                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
12934                         flow_dv_translate_mlx5_item_tag(dev, match_mask,
12935                                                         match_value, items);
12936                         last_item = MLX5_FLOW_ITEM_TAG;
12937                         break;
12938                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
12939                         flow_dv_translate_item_tx_queue(dev, match_mask,
12940                                                         match_value,
12941                                                         items);
12942                         last_item = MLX5_FLOW_ITEM_TX_QUEUE;
12943                         break;
12944                 case RTE_FLOW_ITEM_TYPE_GTP:
12945                         flow_dv_translate_item_gtp(match_mask, match_value,
12946                                                    items, tunnel);
12947                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
12948                         last_item = MLX5_FLOW_LAYER_GTP;
12949                         break;
12950                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
12951                         ret = flow_dv_translate_item_gtp_psc(match_mask,
12952                                                           match_value,
12953                                                           items);
12954                         if (ret)
12955                                 return rte_flow_error_set(error, -ret,
12956                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
12957                                         "cannot create GTP PSC item");
12958                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
12959                         break;
12960                 case RTE_FLOW_ITEM_TYPE_ECPRI:
12961                         if (!mlx5_flex_parser_ecpri_exist(dev)) {
12962                                 /* Create it only the first time to be used. */
12963                                 ret = mlx5_flex_parser_ecpri_alloc(dev);
12964                                 if (ret)
12965                                         return rte_flow_error_set
12966                                                 (error, -ret,
12967                                                 RTE_FLOW_ERROR_TYPE_ITEM,
12968                                                 NULL,
12969                                                 "cannot create eCPRI parser");
12970                         }
12971                         /* Adjust the length matcher and device flow value. */
12972                         matcher.mask.size = MLX5_ST_SZ_BYTES(fte_match_param);
12973                         dev_flow->dv.value.size =
12974                                         MLX5_ST_SZ_BYTES(fte_match_param);
12975                         flow_dv_translate_item_ecpri(dev, match_mask,
12976                                                      match_value, items);
12977                         /* No other protocol should follow eCPRI layer. */
12978                         last_item = MLX5_FLOW_LAYER_ECPRI;
12979                         break;
12980                 case RTE_FLOW_ITEM_TYPE_INTEGRITY:
12981                         flow_dv_translate_item_integrity(match_mask,
12982                                                          match_value,
12983                                                          head_item, items);
12984                         break;
12985                 case RTE_FLOW_ITEM_TYPE_CONNTRACK:
12986                         flow_dv_translate_item_aso_ct(dev, match_mask,
12987                                                       match_value, items);
12988                         break;
12989                 default:
12990                         break;
12991                 }
12992                 item_flags |= last_item;
12993         }
12994         /*
12995          * When E-Switch mode is enabled, we have two cases where we need to
12996          * set the source port manually.
12997          * The first one, is in case of Nic steering rule, and the second is
12998          * E-Switch rule where no port_id item was found. In both cases
12999          * the source port is set according the current port in use.
13000          */
13001         if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
13002             (priv->representor || priv->master)) {
13003                 if (flow_dv_translate_item_port_id(dev, match_mask,
13004                                                    match_value, NULL, attr))
13005                         return -rte_errno;
13006         }
13007 #ifdef RTE_LIBRTE_MLX5_DEBUG
13008         MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
13009                                               dev_flow->dv.value.buf));
13010 #endif
13011         /*
13012          * Layers may be already initialized from prefix flow if this dev_flow
13013          * is the suffix flow.
13014          */
13015         handle->layers |= item_flags;
13016         if (action_flags & MLX5_FLOW_ACTION_RSS)
13017                 flow_dv_hashfields_set(dev_flow, rss_desc);
13018         /* If has RSS action in the sample action, the Sample/Mirror resource
13019          * should be registered after the hash filed be update.
13020          */
13021         if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
13022                 ret = flow_dv_translate_action_sample(dev,
13023                                                       sample,
13024                                                       dev_flow, attr,
13025                                                       &num_of_dest,
13026                                                       sample_actions,
13027                                                       &sample_res,
13028                                                       error);
13029                 if (ret < 0)
13030                         return ret;
13031                 ret = flow_dv_create_action_sample(dev,
13032                                                    dev_flow,
13033                                                    num_of_dest,
13034                                                    &sample_res,
13035                                                    &mdest_res,
13036                                                    sample_actions,
13037                                                    action_flags,
13038                                                    error);
13039                 if (ret < 0)
13040                         return rte_flow_error_set
13041                                                 (error, rte_errno,
13042                                                 RTE_FLOW_ERROR_TYPE_ACTION,
13043                                                 NULL,
13044                                                 "cannot create sample action");
13045                 if (num_of_dest > 1) {
13046                         dev_flow->dv.actions[sample_act_pos] =
13047                         dev_flow->dv.dest_array_res->action;
13048                 } else {
13049                         dev_flow->dv.actions[sample_act_pos] =
13050                         dev_flow->dv.sample_res->verbs_action;
13051                 }
13052         }
13053         /*
13054          * For multiple destination (sample action with ratio=1), the encap
13055          * action and port id action will be combined into group action.
13056          * So need remove the original these actions in the flow and only
13057          * use the sample action instead of.
13058          */
13059         if (num_of_dest > 1 &&
13060             (sample_act->dr_port_id_action || sample_act->dr_jump_action)) {
13061                 int i;
13062                 void *temp_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
13063
13064                 for (i = 0; i < actions_n; i++) {
13065                         if ((sample_act->dr_encap_action &&
13066                                 sample_act->dr_encap_action ==
13067                                 dev_flow->dv.actions[i]) ||
13068                                 (sample_act->dr_port_id_action &&
13069                                 sample_act->dr_port_id_action ==
13070                                 dev_flow->dv.actions[i]) ||
13071                                 (sample_act->dr_jump_action &&
13072                                 sample_act->dr_jump_action ==
13073                                 dev_flow->dv.actions[i]))
13074                                 continue;
13075                         temp_actions[tmp_actions_n++] = dev_flow->dv.actions[i];
13076                 }
13077                 memcpy((void *)dev_flow->dv.actions,
13078                                 (void *)temp_actions,
13079                                 tmp_actions_n * sizeof(void *));
13080                 actions_n = tmp_actions_n;
13081         }
13082         dev_flow->dv.actions_n = actions_n;
13083         dev_flow->act_flags = action_flags;
13084         if (wks->skip_matcher_reg)
13085                 return 0;
13086         /* Register matcher. */
13087         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
13088                                     matcher.mask.size);
13089         matcher.priority = mlx5_get_matcher_priority(dev, attr,
13090                                         matcher.priority);
13091         /* reserved field no needs to be set to 0 here. */
13092         tbl_key.is_fdb = attr->transfer;
13093         tbl_key.is_egress = attr->egress;
13094         tbl_key.level = dev_flow->dv.group;
13095         tbl_key.id = dev_flow->dv.table_id;
13096         if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow,
13097                                      tunnel, attr->group, error))
13098                 return -rte_errno;
13099         return 0;
13100 }
13101
13102 /**
13103  * Set hash RX queue by hash fields (see enum ibv_rx_hash_fields)
13104  * and tunnel.
13105  *
13106  * @param[in, out] action
13107  *   Shred RSS action holding hash RX queue objects.
13108  * @param[in] hash_fields
13109  *   Defines combination of packet fields to participate in RX hash.
13110  * @param[in] tunnel
13111  *   Tunnel type
13112  * @param[in] hrxq_idx
13113  *   Hash RX queue index to set.
13114  *
13115  * @return
13116  *   0 on success, otherwise negative errno value.
13117  */
13118 static int
13119 __flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action,
13120                               const uint64_t hash_fields,
13121                               uint32_t hrxq_idx)
13122 {
13123         uint32_t *hrxqs = action->hrxq;
13124
13125         switch (hash_fields & ~IBV_RX_HASH_INNER) {
13126         case MLX5_RSS_HASH_IPV4:
13127                 /* fall-through. */
13128         case MLX5_RSS_HASH_IPV4_DST_ONLY:
13129                 /* fall-through. */
13130         case MLX5_RSS_HASH_IPV4_SRC_ONLY:
13131                 hrxqs[0] = hrxq_idx;
13132                 return 0;
13133         case MLX5_RSS_HASH_IPV4_TCP:
13134                 /* fall-through. */
13135         case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
13136                 /* fall-through. */
13137         case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
13138                 hrxqs[1] = hrxq_idx;
13139                 return 0;
13140         case MLX5_RSS_HASH_IPV4_UDP:
13141                 /* fall-through. */
13142         case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
13143                 /* fall-through. */
13144         case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
13145                 hrxqs[2] = hrxq_idx;
13146                 return 0;
13147         case MLX5_RSS_HASH_IPV6:
13148                 /* fall-through. */
13149         case MLX5_RSS_HASH_IPV6_DST_ONLY:
13150                 /* fall-through. */
13151         case MLX5_RSS_HASH_IPV6_SRC_ONLY:
13152                 hrxqs[3] = hrxq_idx;
13153                 return 0;
13154         case MLX5_RSS_HASH_IPV6_TCP:
13155                 /* fall-through. */
13156         case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
13157                 /* fall-through. */
13158         case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
13159                 hrxqs[4] = hrxq_idx;
13160                 return 0;
13161         case MLX5_RSS_HASH_IPV6_UDP:
13162                 /* fall-through. */
13163         case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
13164                 /* fall-through. */
13165         case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
13166                 hrxqs[5] = hrxq_idx;
13167                 return 0;
13168         case MLX5_RSS_HASH_NONE:
13169                 hrxqs[6] = hrxq_idx;
13170                 return 0;
13171         default:
13172                 return -1;
13173         }
13174 }
13175
13176 /**
13177  * Look up for hash RX queue by hash fields (see enum ibv_rx_hash_fields)
13178  * and tunnel.
13179  *
13180  * @param[in] dev
13181  *   Pointer to the Ethernet device structure.
13182  * @param[in] idx
13183  *   Shared RSS action ID holding hash RX queue objects.
13184  * @param[in] hash_fields
13185  *   Defines combination of packet fields to participate in RX hash.
13186  * @param[in] tunnel
13187  *   Tunnel type
13188  *
13189  * @return
13190  *   Valid hash RX queue index, otherwise 0.
13191  */
13192 static uint32_t
13193 __flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
13194                                  const uint64_t hash_fields)
13195 {
13196         struct mlx5_priv *priv = dev->data->dev_private;
13197         struct mlx5_shared_action_rss *shared_rss =
13198             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
13199         const uint32_t *hrxqs = shared_rss->hrxq;
13200
13201         switch (hash_fields & ~IBV_RX_HASH_INNER) {
13202         case MLX5_RSS_HASH_IPV4:
13203                 /* fall-through. */
13204         case MLX5_RSS_HASH_IPV4_DST_ONLY:
13205                 /* fall-through. */
13206         case MLX5_RSS_HASH_IPV4_SRC_ONLY:
13207                 return hrxqs[0];
13208         case MLX5_RSS_HASH_IPV4_TCP:
13209                 /* fall-through. */
13210         case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
13211                 /* fall-through. */
13212         case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
13213                 return hrxqs[1];
13214         case MLX5_RSS_HASH_IPV4_UDP:
13215                 /* fall-through. */
13216         case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
13217                 /* fall-through. */
13218         case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
13219                 return hrxqs[2];
13220         case MLX5_RSS_HASH_IPV6:
13221                 /* fall-through. */
13222         case MLX5_RSS_HASH_IPV6_DST_ONLY:
13223                 /* fall-through. */
13224         case MLX5_RSS_HASH_IPV6_SRC_ONLY:
13225                 return hrxqs[3];
13226         case MLX5_RSS_HASH_IPV6_TCP:
13227                 /* fall-through. */
13228         case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
13229                 /* fall-through. */
13230         case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
13231                 return hrxqs[4];
13232         case MLX5_RSS_HASH_IPV6_UDP:
13233                 /* fall-through. */
13234         case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
13235                 /* fall-through. */
13236         case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
13237                 return hrxqs[5];
13238         case MLX5_RSS_HASH_NONE:
13239                 return hrxqs[6];
13240         default:
13241                 return 0;
13242         }
13243
13244 }
13245
13246 /**
13247  * Apply the flow to the NIC, lock free,
13248  * (mutex should be acquired by caller).
13249  *
13250  * @param[in] dev
13251  *   Pointer to the Ethernet device structure.
13252  * @param[in, out] flow
13253  *   Pointer to flow structure.
13254  * @param[out] error
13255  *   Pointer to error structure.
13256  *
13257  * @return
13258  *   0 on success, a negative errno value otherwise and rte_errno is set.
13259  */
13260 static int
13261 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
13262               struct rte_flow_error *error)
13263 {
13264         struct mlx5_flow_dv_workspace *dv;
13265         struct mlx5_flow_handle *dh;
13266         struct mlx5_flow_handle_dv *dv_h;
13267         struct mlx5_flow *dev_flow;
13268         struct mlx5_priv *priv = dev->data->dev_private;
13269         uint32_t handle_idx;
13270         int n;
13271         int err;
13272         int idx;
13273         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
13274         struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
13275
13276         MLX5_ASSERT(wks);
13277         for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
13278                 dev_flow = &wks->flows[idx];
13279                 dv = &dev_flow->dv;
13280                 dh = dev_flow->handle;
13281                 dv_h = &dh->dvh;
13282                 n = dv->actions_n;
13283                 if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
13284                         if (dv->transfer) {
13285                                 MLX5_ASSERT(priv->sh->dr_drop_action);
13286                                 dv->actions[n++] = priv->sh->dr_drop_action;
13287                         } else {
13288 #ifdef HAVE_MLX5DV_DR
13289                                 /* DR supports drop action placeholder. */
13290                                 MLX5_ASSERT(priv->sh->dr_drop_action);
13291                                 dv->actions[n++] = priv->sh->dr_drop_action;
13292 #else
13293                                 /* For DV we use the explicit drop queue. */
13294                                 MLX5_ASSERT(priv->drop_queue.hrxq);
13295                                 dv->actions[n++] =
13296                                                 priv->drop_queue.hrxq->action;
13297 #endif
13298                         }
13299                 } else if ((dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
13300                            !dv_h->rix_sample && !dv_h->rix_dest_array)) {
13301                         struct mlx5_hrxq *hrxq;
13302                         uint32_t hrxq_idx;
13303
13304                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
13305                                                     &hrxq_idx);
13306                         if (!hrxq) {
13307                                 rte_flow_error_set
13308                                         (error, rte_errno,
13309                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13310                                          "cannot get hash queue");
13311                                 goto error;
13312                         }
13313                         dh->rix_hrxq = hrxq_idx;
13314                         dv->actions[n++] = hrxq->action;
13315                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
13316                         struct mlx5_hrxq *hrxq = NULL;
13317                         uint32_t hrxq_idx;
13318
13319                         hrxq_idx = __flow_dv_action_rss_hrxq_lookup(dev,
13320                                                 rss_desc->shared_rss,
13321                                                 dev_flow->hash_fields);
13322                         if (hrxq_idx)
13323                                 hrxq = mlx5_ipool_get
13324                                         (priv->sh->ipool[MLX5_IPOOL_HRXQ],
13325                                          hrxq_idx);
13326                         if (!hrxq) {
13327                                 rte_flow_error_set
13328                                         (error, rte_errno,
13329                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13330                                          "cannot get hash queue");
13331                                 goto error;
13332                         }
13333                         dh->rix_srss = rss_desc->shared_rss;
13334                         dv->actions[n++] = hrxq->action;
13335                 } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
13336                         if (!priv->sh->default_miss_action) {
13337                                 rte_flow_error_set
13338                                         (error, rte_errno,
13339                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13340                                          "default miss action not be created.");
13341                                 goto error;
13342                         }
13343                         dv->actions[n++] = priv->sh->default_miss_action;
13344                 }
13345                 err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object,
13346                                                (void *)&dv->value, n,
13347                                                dv->actions, &dh->drv_flow);
13348                 if (err) {
13349                         rte_flow_error_set(error, errno,
13350                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13351                                            NULL,
13352                                            "hardware refuses to create flow");
13353                         goto error;
13354                 }
13355                 if (priv->vmwa_context &&
13356                     dh->vf_vlan.tag && !dh->vf_vlan.created) {
13357                         /*
13358                          * The rule contains the VLAN pattern.
13359                          * For VF we are going to create VLAN
13360                          * interface to make hypervisor set correct
13361                          * e-Switch vport context.
13362                          */
13363                         mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
13364                 }
13365         }
13366         return 0;
13367 error:
13368         err = rte_errno; /* Save rte_errno before cleanup. */
13369         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
13370                        handle_idx, dh, next) {
13371                 /* hrxq is union, don't clear it if the flag is not set. */
13372                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) {
13373                         mlx5_hrxq_release(dev, dh->rix_hrxq);
13374                         dh->rix_hrxq = 0;
13375                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
13376                         dh->rix_srss = 0;
13377                 }
13378                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
13379                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
13380         }
13381         rte_errno = err; /* Restore rte_errno. */
13382         return -rte_errno;
13383 }
13384
13385 void
13386 flow_dv_matcher_remove_cb(struct mlx5_cache_list *list __rte_unused,
13387                           struct mlx5_cache_entry *entry)
13388 {
13389         struct mlx5_flow_dv_matcher *cache = container_of(entry, typeof(*cache),
13390                                                           entry);
13391
13392         claim_zero(mlx5_flow_os_destroy_flow_matcher(cache->matcher_object));
13393         mlx5_free(cache);
13394 }
13395
13396 /**
13397  * Release the flow matcher.
13398  *
13399  * @param dev
13400  *   Pointer to Ethernet device.
13401  * @param port_id
13402  *   Index to port ID action resource.
13403  *
13404  * @return
13405  *   1 while a reference on it exists, 0 when freed.
13406  */
13407 static int
13408 flow_dv_matcher_release(struct rte_eth_dev *dev,
13409                         struct mlx5_flow_handle *handle)
13410 {
13411         struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
13412         struct mlx5_flow_tbl_data_entry *tbl = container_of(matcher->tbl,
13413                                                             typeof(*tbl), tbl);
13414         int ret;
13415
13416         MLX5_ASSERT(matcher->matcher_object);
13417         ret = mlx5_cache_unregister(&tbl->matchers, &matcher->entry);
13418         flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl->tbl);
13419         return ret;
13420 }
13421
13422 /**
13423  * Release encap_decap resource.
13424  *
13425  * @param list
13426  *   Pointer to the hash list.
13427  * @param entry
13428  *   Pointer to exist resource entry object.
13429  */
13430 void
13431 flow_dv_encap_decap_remove_cb(struct mlx5_hlist *list,
13432                               struct mlx5_hlist_entry *entry)
13433 {
13434         struct mlx5_dev_ctx_shared *sh = list->ctx;
13435         struct mlx5_flow_dv_encap_decap_resource *res =
13436                 container_of(entry, typeof(*res), entry);
13437
13438         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
13439         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
13440 }
13441
13442 /**
13443  * Release an encap/decap resource.
13444  *
13445  * @param dev
13446  *   Pointer to Ethernet device.
13447  * @param encap_decap_idx
13448  *   Index of encap decap resource.
13449  *
13450  * @return
13451  *   1 while a reference on it exists, 0 when freed.
13452  */
13453 static int
13454 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
13455                                      uint32_t encap_decap_idx)
13456 {
13457         struct mlx5_priv *priv = dev->data->dev_private;
13458         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
13459
13460         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
13461                                         encap_decap_idx);
13462         if (!cache_resource)
13463                 return 0;
13464         MLX5_ASSERT(cache_resource->action);
13465         return mlx5_hlist_unregister(priv->sh->encaps_decaps,
13466                                      &cache_resource->entry);
13467 }
13468
13469 /**
13470  * Release an jump to table action resource.
13471  *
13472  * @param dev
13473  *   Pointer to Ethernet device.
13474  * @param rix_jump
13475  *   Index to the jump action resource.
13476  *
13477  * @return
13478  *   1 while a reference on it exists, 0 when freed.
13479  */
13480 static int
13481 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
13482                                   uint32_t rix_jump)
13483 {
13484         struct mlx5_priv *priv = dev->data->dev_private;
13485         struct mlx5_flow_tbl_data_entry *tbl_data;
13486
13487         tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
13488                                   rix_jump);
13489         if (!tbl_data)
13490                 return 0;
13491         return flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl_data->tbl);
13492 }
13493
13494 void
13495 flow_dv_modify_remove_cb(struct mlx5_hlist *list __rte_unused,
13496                          struct mlx5_hlist_entry *entry)
13497 {
13498         struct mlx5_flow_dv_modify_hdr_resource *res =
13499                 container_of(entry, typeof(*res), entry);
13500
13501         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
13502         mlx5_free(entry);
13503 }
13504
13505 /**
13506  * Release a modify-header resource.
13507  *
13508  * @param dev
13509  *   Pointer to Ethernet device.
13510  * @param handle
13511  *   Pointer to mlx5_flow_handle.
13512  *
13513  * @return
13514  *   1 while a reference on it exists, 0 when freed.
13515  */
13516 static int
13517 flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev,
13518                                     struct mlx5_flow_handle *handle)
13519 {
13520         struct mlx5_priv *priv = dev->data->dev_private;
13521         struct mlx5_flow_dv_modify_hdr_resource *entry = handle->dvh.modify_hdr;
13522
13523         MLX5_ASSERT(entry->action);
13524         return mlx5_hlist_unregister(priv->sh->modify_cmds, &entry->entry);
13525 }
13526
13527 void
13528 flow_dv_port_id_remove_cb(struct mlx5_cache_list *list,
13529                           struct mlx5_cache_entry *entry)
13530 {
13531         struct mlx5_dev_ctx_shared *sh = list->ctx;
13532         struct mlx5_flow_dv_port_id_action_resource *cache =
13533                         container_of(entry, typeof(*cache), entry);
13534
13535         claim_zero(mlx5_flow_os_destroy_flow_action(cache->action));
13536         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], cache->idx);
13537 }
13538
13539 /**
13540  * Release port ID action resource.
13541  *
13542  * @param dev
13543  *   Pointer to Ethernet device.
13544  * @param handle
13545  *   Pointer to mlx5_flow_handle.
13546  *
13547  * @return
13548  *   1 while a reference on it exists, 0 when freed.
13549  */
13550 static int
13551 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
13552                                         uint32_t port_id)
13553 {
13554         struct mlx5_priv *priv = dev->data->dev_private;
13555         struct mlx5_flow_dv_port_id_action_resource *cache;
13556
13557         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], port_id);
13558         if (!cache)
13559                 return 0;
13560         MLX5_ASSERT(cache->action);
13561         return mlx5_cache_unregister(&priv->sh->port_id_action_list,
13562                                      &cache->entry);
13563 }
13564
13565 /**
13566  * Release shared RSS action resource.
13567  *
13568  * @param dev
13569  *   Pointer to Ethernet device.
13570  * @param srss
13571  *   Shared RSS action index.
13572  */
13573 static void
13574 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss)
13575 {
13576         struct mlx5_priv *priv = dev->data->dev_private;
13577         struct mlx5_shared_action_rss *shared_rss;
13578
13579         shared_rss = mlx5_ipool_get
13580                         (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], srss);
13581         __atomic_sub_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
13582 }
13583
13584 void
13585 flow_dv_push_vlan_remove_cb(struct mlx5_cache_list *list,
13586                             struct mlx5_cache_entry *entry)
13587 {
13588         struct mlx5_dev_ctx_shared *sh = list->ctx;
13589         struct mlx5_flow_dv_push_vlan_action_resource *cache =
13590                         container_of(entry, typeof(*cache), entry);
13591
13592         claim_zero(mlx5_flow_os_destroy_flow_action(cache->action));
13593         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], cache->idx);
13594 }
13595
13596 /**
13597  * Release push vlan action resource.
13598  *
13599  * @param dev
13600  *   Pointer to Ethernet device.
13601  * @param handle
13602  *   Pointer to mlx5_flow_handle.
13603  *
13604  * @return
13605  *   1 while a reference on it exists, 0 when freed.
13606  */
13607 static int
13608 flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev,
13609                                           struct mlx5_flow_handle *handle)
13610 {
13611         struct mlx5_priv *priv = dev->data->dev_private;
13612         struct mlx5_flow_dv_push_vlan_action_resource *cache;
13613         uint32_t idx = handle->dvh.rix_push_vlan;
13614
13615         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
13616         if (!cache)
13617                 return 0;
13618         MLX5_ASSERT(cache->action);
13619         return mlx5_cache_unregister(&priv->sh->push_vlan_action_list,
13620                                      &cache->entry);
13621 }
13622
13623 /**
13624  * Release the fate resource.
13625  *
13626  * @param dev
13627  *   Pointer to Ethernet device.
13628  * @param handle
13629  *   Pointer to mlx5_flow_handle.
13630  */
13631 static void
13632 flow_dv_fate_resource_release(struct rte_eth_dev *dev,
13633                                struct mlx5_flow_handle *handle)
13634 {
13635         if (!handle->rix_fate)
13636                 return;
13637         switch (handle->fate_action) {
13638         case MLX5_FLOW_FATE_QUEUE:
13639                 if (!handle->dvh.rix_sample && !handle->dvh.rix_dest_array)
13640                         mlx5_hrxq_release(dev, handle->rix_hrxq);
13641                 break;
13642         case MLX5_FLOW_FATE_JUMP:
13643                 flow_dv_jump_tbl_resource_release(dev, handle->rix_jump);
13644                 break;
13645         case MLX5_FLOW_FATE_PORT_ID:
13646                 flow_dv_port_id_action_resource_release(dev,
13647                                 handle->rix_port_id_action);
13648                 break;
13649         default:
13650                 DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
13651                 break;
13652         }
13653         handle->rix_fate = 0;
13654 }
13655
13656 void
13657 flow_dv_sample_remove_cb(struct mlx5_cache_list *list __rte_unused,
13658                          struct mlx5_cache_entry *entry)
13659 {
13660         struct mlx5_flow_dv_sample_resource *cache_resource =
13661                         container_of(entry, typeof(*cache_resource), entry);
13662         struct rte_eth_dev *dev = cache_resource->dev;
13663         struct mlx5_priv *priv = dev->data->dev_private;
13664
13665         if (cache_resource->verbs_action)
13666                 claim_zero(mlx5_flow_os_destroy_flow_action
13667                                 (cache_resource->verbs_action));
13668         if (cache_resource->normal_path_tbl)
13669                 flow_dv_tbl_resource_release(MLX5_SH(dev),
13670                         cache_resource->normal_path_tbl);
13671         flow_dv_sample_sub_actions_release(dev,
13672                                 &cache_resource->sample_idx);
13673         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
13674                         cache_resource->idx);
13675         DRV_LOG(DEBUG, "sample resource %p: removed",
13676                 (void *)cache_resource);
13677 }
13678
13679 /**
13680  * Release an sample resource.
13681  *
13682  * @param dev
13683  *   Pointer to Ethernet device.
13684  * @param handle
13685  *   Pointer to mlx5_flow_handle.
13686  *
13687  * @return
13688  *   1 while a reference on it exists, 0 when freed.
13689  */
13690 static int
13691 flow_dv_sample_resource_release(struct rte_eth_dev *dev,
13692                                      struct mlx5_flow_handle *handle)
13693 {
13694         struct mlx5_priv *priv = dev->data->dev_private;
13695         struct mlx5_flow_dv_sample_resource *cache_resource;
13696
13697         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
13698                          handle->dvh.rix_sample);
13699         if (!cache_resource)
13700                 return 0;
13701         MLX5_ASSERT(cache_resource->verbs_action);
13702         return mlx5_cache_unregister(&priv->sh->sample_action_list,
13703                                      &cache_resource->entry);
13704 }
13705
13706 void
13707 flow_dv_dest_array_remove_cb(struct mlx5_cache_list *list __rte_unused,
13708                              struct mlx5_cache_entry *entry)
13709 {
13710         struct mlx5_flow_dv_dest_array_resource *cache_resource =
13711                         container_of(entry, typeof(*cache_resource), entry);
13712         struct rte_eth_dev *dev = cache_resource->dev;
13713         struct mlx5_priv *priv = dev->data->dev_private;
13714         uint32_t i = 0;
13715
13716         MLX5_ASSERT(cache_resource->action);
13717         if (cache_resource->action)
13718                 claim_zero(mlx5_flow_os_destroy_flow_action
13719                                         (cache_resource->action));
13720         for (; i < cache_resource->num_of_dest; i++)
13721                 flow_dv_sample_sub_actions_release(dev,
13722                                 &cache_resource->sample_idx[i]);
13723         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
13724                         cache_resource->idx);
13725         DRV_LOG(DEBUG, "destination array resource %p: removed",
13726                 (void *)cache_resource);
13727 }
13728
13729 /**
13730  * Release an destination array resource.
13731  *
13732  * @param dev
13733  *   Pointer to Ethernet device.
13734  * @param handle
13735  *   Pointer to mlx5_flow_handle.
13736  *
13737  * @return
13738  *   1 while a reference on it exists, 0 when freed.
13739  */
13740 static int
13741 flow_dv_dest_array_resource_release(struct rte_eth_dev *dev,
13742                                     struct mlx5_flow_handle *handle)
13743 {
13744         struct mlx5_priv *priv = dev->data->dev_private;
13745         struct mlx5_flow_dv_dest_array_resource *cache;
13746
13747         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
13748                                handle->dvh.rix_dest_array);
13749         if (!cache)
13750                 return 0;
13751         MLX5_ASSERT(cache->action);
13752         return mlx5_cache_unregister(&priv->sh->dest_array_list,
13753                                      &cache->entry);
13754 }
13755
13756 static void
13757 flow_dv_geneve_tlv_option_resource_release(struct rte_eth_dev *dev)
13758 {
13759         struct mlx5_priv *priv = dev->data->dev_private;
13760         struct mlx5_dev_ctx_shared *sh = priv->sh;
13761         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
13762                                 sh->geneve_tlv_option_resource;
13763         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
13764         if (geneve_opt_resource) {
13765                 if (!(__atomic_sub_fetch(&geneve_opt_resource->refcnt, 1,
13766                                          __ATOMIC_RELAXED))) {
13767                         claim_zero(mlx5_devx_cmd_destroy
13768                                         (geneve_opt_resource->obj));
13769                         mlx5_free(sh->geneve_tlv_option_resource);
13770                         sh->geneve_tlv_option_resource = NULL;
13771                 }
13772         }
13773         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
13774 }
13775
13776 /**
13777  * Remove the flow from the NIC but keeps it in memory.
13778  * Lock free, (mutex should be acquired by caller).
13779  *
13780  * @param[in] dev
13781  *   Pointer to Ethernet device.
13782  * @param[in, out] flow
13783  *   Pointer to flow structure.
13784  */
13785 static void
13786 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
13787 {
13788         struct mlx5_flow_handle *dh;
13789         uint32_t handle_idx;
13790         struct mlx5_priv *priv = dev->data->dev_private;
13791
13792         if (!flow)
13793                 return;
13794         handle_idx = flow->dev_handles;
13795         while (handle_idx) {
13796                 dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
13797                                     handle_idx);
13798                 if (!dh)
13799                         return;
13800                 if (dh->drv_flow) {
13801                         claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow));
13802                         dh->drv_flow = NULL;
13803                 }
13804                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
13805                         flow_dv_fate_resource_release(dev, dh);
13806                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
13807                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
13808                 handle_idx = dh->next.next;
13809         }
13810 }
13811
13812 /**
13813  * Remove the flow from the NIC and the memory.
13814  * Lock free, (mutex should be acquired by caller).
13815  *
13816  * @param[in] dev
13817  *   Pointer to the Ethernet device structure.
13818  * @param[in, out] flow
13819  *   Pointer to flow structure.
13820  */
13821 static void
13822 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
13823 {
13824         struct mlx5_flow_handle *dev_handle;
13825         struct mlx5_priv *priv = dev->data->dev_private;
13826         struct mlx5_flow_meter_info *fm = NULL;
13827         uint32_t srss = 0;
13828
13829         if (!flow)
13830                 return;
13831         flow_dv_remove(dev, flow);
13832         if (flow->counter) {
13833                 flow_dv_counter_free(dev, flow->counter);
13834                 flow->counter = 0;
13835         }
13836         if (flow->meter) {
13837                 fm = flow_dv_meter_find_by_idx(priv, flow->meter);
13838                 if (fm)
13839                         mlx5_flow_meter_detach(priv, fm);
13840                 flow->meter = 0;
13841         }
13842         /* Keep the current age handling by default. */
13843         if (flow->indirect_type == MLX5_INDIRECT_ACTION_TYPE_CT && flow->ct)
13844                 flow_dv_aso_ct_release(dev, flow->ct);
13845         else if (flow->age)
13846                 flow_dv_aso_age_release(dev, flow->age);
13847         if (flow->geneve_tlv_option) {
13848                 flow_dv_geneve_tlv_option_resource_release(dev);
13849                 flow->geneve_tlv_option = 0;
13850         }
13851         while (flow->dev_handles) {
13852                 uint32_t tmp_idx = flow->dev_handles;
13853
13854                 dev_handle = mlx5_ipool_get(priv->sh->ipool
13855                                             [MLX5_IPOOL_MLX5_FLOW], tmp_idx);
13856                 if (!dev_handle)
13857                         return;
13858                 flow->dev_handles = dev_handle->next.next;
13859                 if (dev_handle->dvh.matcher)
13860                         flow_dv_matcher_release(dev, dev_handle);
13861                 if (dev_handle->dvh.rix_sample)
13862                         flow_dv_sample_resource_release(dev, dev_handle);
13863                 if (dev_handle->dvh.rix_dest_array)
13864                         flow_dv_dest_array_resource_release(dev, dev_handle);
13865                 if (dev_handle->dvh.rix_encap_decap)
13866                         flow_dv_encap_decap_resource_release(dev,
13867                                 dev_handle->dvh.rix_encap_decap);
13868                 if (dev_handle->dvh.modify_hdr)
13869                         flow_dv_modify_hdr_resource_release(dev, dev_handle);
13870                 if (dev_handle->dvh.rix_push_vlan)
13871                         flow_dv_push_vlan_action_resource_release(dev,
13872                                                                   dev_handle);
13873                 if (dev_handle->dvh.rix_tag)
13874                         flow_dv_tag_release(dev,
13875                                             dev_handle->dvh.rix_tag);
13876                 if (dev_handle->fate_action != MLX5_FLOW_FATE_SHARED_RSS)
13877                         flow_dv_fate_resource_release(dev, dev_handle);
13878                 else if (!srss)
13879                         srss = dev_handle->rix_srss;
13880                 if (fm && dev_handle->is_meter_flow_id &&
13881                     dev_handle->split_flow_id)
13882                         mlx5_ipool_free(fm->flow_ipool,
13883                                         dev_handle->split_flow_id);
13884                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
13885                            tmp_idx);
13886         }
13887         if (srss)
13888                 flow_dv_shared_rss_action_release(dev, srss);
13889 }
13890
13891 /**
13892  * Release array of hash RX queue objects.
13893  * Helper function.
13894  *
13895  * @param[in] dev
13896  *   Pointer to the Ethernet device structure.
13897  * @param[in, out] hrxqs
13898  *   Array of hash RX queue objects.
13899  *
13900  * @return
13901  *   Total number of references to hash RX queue objects in *hrxqs* array
13902  *   after this operation.
13903  */
13904 static int
13905 __flow_dv_hrxqs_release(struct rte_eth_dev *dev,
13906                         uint32_t (*hrxqs)[MLX5_RSS_HASH_FIELDS_LEN])
13907 {
13908         size_t i;
13909         int remaining = 0;
13910
13911         for (i = 0; i < RTE_DIM(*hrxqs); i++) {
13912                 int ret = mlx5_hrxq_release(dev, (*hrxqs)[i]);
13913
13914                 if (!ret)
13915                         (*hrxqs)[i] = 0;
13916                 remaining += ret;
13917         }
13918         return remaining;
13919 }
13920
13921 /**
13922  * Release all hash RX queue objects representing shared RSS action.
13923  *
13924  * @param[in] dev
13925  *   Pointer to the Ethernet device structure.
13926  * @param[in, out] action
13927  *   Shared RSS action to remove hash RX queue objects from.
13928  *
13929  * @return
13930  *   Total number of references to hash RX queue objects stored in *action*
13931  *   after this operation.
13932  *   Expected to be 0 if no external references held.
13933  */
13934 static int
13935 __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
13936                                  struct mlx5_shared_action_rss *shared_rss)
13937 {
13938         return __flow_dv_hrxqs_release(dev, &shared_rss->hrxq);
13939 }
13940
13941 /**
13942  * Adjust L3/L4 hash value of pre-created shared RSS hrxq according to
13943  * user input.
13944  *
13945  * Only one hash value is available for one L3+L4 combination:
13946  * for example:
13947  * MLX5_RSS_HASH_IPV4, MLX5_RSS_HASH_IPV4_SRC_ONLY, and
13948  * MLX5_RSS_HASH_IPV4_DST_ONLY are mutually exclusive so they can share
13949  * same slot in mlx5_rss_hash_fields.
13950  *
13951  * @param[in] rss
13952  *   Pointer to the shared action RSS conf.
13953  * @param[in, out] hash_field
13954  *   hash_field variable needed to be adjusted.
13955  *
13956  * @return
13957  *   void
13958  */
13959 static void
13960 __flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
13961                                      uint64_t *hash_field)
13962 {
13963         uint64_t rss_types = rss->origin.types;
13964
13965         switch (*hash_field & ~IBV_RX_HASH_INNER) {
13966         case MLX5_RSS_HASH_IPV4:
13967                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
13968                         *hash_field &= ~MLX5_RSS_HASH_IPV4;
13969                         if (rss_types & ETH_RSS_L3_DST_ONLY)
13970                                 *hash_field |= IBV_RX_HASH_DST_IPV4;
13971                         else if (rss_types & ETH_RSS_L3_SRC_ONLY)
13972                                 *hash_field |= IBV_RX_HASH_SRC_IPV4;
13973                         else
13974                                 *hash_field |= MLX5_RSS_HASH_IPV4;
13975                 }
13976                 return;
13977         case MLX5_RSS_HASH_IPV6:
13978                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
13979                         *hash_field &= ~MLX5_RSS_HASH_IPV6;
13980                         if (rss_types & ETH_RSS_L3_DST_ONLY)
13981                                 *hash_field |= IBV_RX_HASH_DST_IPV6;
13982                         else if (rss_types & ETH_RSS_L3_SRC_ONLY)
13983                                 *hash_field |= IBV_RX_HASH_SRC_IPV6;
13984                         else
13985                                 *hash_field |= MLX5_RSS_HASH_IPV6;
13986                 }
13987                 return;
13988         case MLX5_RSS_HASH_IPV4_UDP:
13989                 /* fall-through. */
13990         case MLX5_RSS_HASH_IPV6_UDP:
13991                 if (rss_types & ETH_RSS_UDP) {
13992                         *hash_field &= ~MLX5_UDP_IBV_RX_HASH;
13993                         if (rss_types & ETH_RSS_L4_DST_ONLY)
13994                                 *hash_field |= IBV_RX_HASH_DST_PORT_UDP;
13995                         else if (rss_types & ETH_RSS_L4_SRC_ONLY)
13996                                 *hash_field |= IBV_RX_HASH_SRC_PORT_UDP;
13997                         else
13998                                 *hash_field |= MLX5_UDP_IBV_RX_HASH;
13999                 }
14000                 return;
14001         case MLX5_RSS_HASH_IPV4_TCP:
14002                 /* fall-through. */
14003         case MLX5_RSS_HASH_IPV6_TCP:
14004                 if (rss_types & ETH_RSS_TCP) {
14005                         *hash_field &= ~MLX5_TCP_IBV_RX_HASH;
14006                         if (rss_types & ETH_RSS_L4_DST_ONLY)
14007                                 *hash_field |= IBV_RX_HASH_DST_PORT_TCP;
14008                         else if (rss_types & ETH_RSS_L4_SRC_ONLY)
14009                                 *hash_field |= IBV_RX_HASH_SRC_PORT_TCP;
14010                         else
14011                                 *hash_field |= MLX5_TCP_IBV_RX_HASH;
14012                 }
14013                 return;
14014         default:
14015                 return;
14016         }
14017 }
14018
14019 /**
14020  * Setup shared RSS action.
14021  * Prepare set of hash RX queue objects sufficient to handle all valid
14022  * hash_fields combinations (see enum ibv_rx_hash_fields).
14023  *
14024  * @param[in] dev
14025  *   Pointer to the Ethernet device structure.
14026  * @param[in] action_idx
14027  *   Shared RSS action ipool index.
14028  * @param[in, out] action
14029  *   Partially initialized shared RSS action.
14030  * @param[out] error
14031  *   Perform verbose error reporting if not NULL. Initialized in case of
14032  *   error only.
14033  *
14034  * @return
14035  *   0 on success, otherwise negative errno value.
14036  */
14037 static int
14038 __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
14039                            uint32_t action_idx,
14040                            struct mlx5_shared_action_rss *shared_rss,
14041                            struct rte_flow_error *error)
14042 {
14043         struct mlx5_flow_rss_desc rss_desc = { 0 };
14044         size_t i;
14045         int err;
14046
14047         if (mlx5_ind_table_obj_setup(dev, shared_rss->ind_tbl)) {
14048                 return rte_flow_error_set(error, rte_errno,
14049                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14050                                           "cannot setup indirection table");
14051         }
14052         memcpy(rss_desc.key, shared_rss->origin.key, MLX5_RSS_HASH_KEY_LEN);
14053         rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
14054         rss_desc.const_q = shared_rss->origin.queue;
14055         rss_desc.queue_num = shared_rss->origin.queue_num;
14056         /* Set non-zero value to indicate a shared RSS. */
14057         rss_desc.shared_rss = action_idx;
14058         rss_desc.ind_tbl = shared_rss->ind_tbl;
14059         for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
14060                 uint32_t hrxq_idx;
14061                 uint64_t hash_fields = mlx5_rss_hash_fields[i];
14062                 int tunnel = 0;
14063
14064                 __flow_dv_action_rss_l34_hash_adjust(shared_rss, &hash_fields);
14065                 if (shared_rss->origin.level > 1) {
14066                         hash_fields |= IBV_RX_HASH_INNER;
14067                         tunnel = 1;
14068                 }
14069                 rss_desc.tunnel = tunnel;
14070                 rss_desc.hash_fields = hash_fields;
14071                 hrxq_idx = mlx5_hrxq_get(dev, &rss_desc);
14072                 if (!hrxq_idx) {
14073                         rte_flow_error_set
14074                                 (error, rte_errno,
14075                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14076                                  "cannot get hash queue");
14077                         goto error_hrxq_new;
14078                 }
14079                 err = __flow_dv_action_rss_hrxq_set
14080                         (shared_rss, hash_fields, hrxq_idx);
14081                 MLX5_ASSERT(!err);
14082         }
14083         return 0;
14084 error_hrxq_new:
14085         err = rte_errno;
14086         __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
14087         if (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true))
14088                 shared_rss->ind_tbl = NULL;
14089         rte_errno = err;
14090         return -rte_errno;
14091 }
14092
14093 /**
14094  * Create shared RSS action.
14095  *
14096  * @param[in] dev
14097  *   Pointer to the Ethernet device structure.
14098  * @param[in] conf
14099  *   Shared action configuration.
14100  * @param[in] rss
14101  *   RSS action specification used to create shared action.
14102  * @param[out] error
14103  *   Perform verbose error reporting if not NULL. Initialized in case of
14104  *   error only.
14105  *
14106  * @return
14107  *   A valid shared action ID in case of success, 0 otherwise and
14108  *   rte_errno is set.
14109  */
14110 static uint32_t
14111 __flow_dv_action_rss_create(struct rte_eth_dev *dev,
14112                             const struct rte_flow_indir_action_conf *conf,
14113                             const struct rte_flow_action_rss *rss,
14114                             struct rte_flow_error *error)
14115 {
14116         struct mlx5_priv *priv = dev->data->dev_private;
14117         struct mlx5_shared_action_rss *shared_rss = NULL;
14118         void *queue = NULL;
14119         struct rte_flow_action_rss *origin;
14120         const uint8_t *rss_key;
14121         uint32_t queue_size = rss->queue_num * sizeof(uint16_t);
14122         uint32_t idx;
14123
14124         RTE_SET_USED(conf);
14125         queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
14126                             0, SOCKET_ID_ANY);
14127         shared_rss = mlx5_ipool_zmalloc
14128                          (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], &idx);
14129         if (!shared_rss || !queue) {
14130                 rte_flow_error_set(error, ENOMEM,
14131                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14132                                    "cannot allocate resource memory");
14133                 goto error_rss_init;
14134         }
14135         if (idx > (1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET)) {
14136                 rte_flow_error_set(error, E2BIG,
14137                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14138                                    "rss action number out of range");
14139                 goto error_rss_init;
14140         }
14141         shared_rss->ind_tbl = mlx5_malloc(MLX5_MEM_ZERO,
14142                                           sizeof(*shared_rss->ind_tbl),
14143                                           0, SOCKET_ID_ANY);
14144         if (!shared_rss->ind_tbl) {
14145                 rte_flow_error_set(error, ENOMEM,
14146                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14147                                    "cannot allocate resource memory");
14148                 goto error_rss_init;
14149         }
14150         memcpy(queue, rss->queue, queue_size);
14151         shared_rss->ind_tbl->queues = queue;
14152         shared_rss->ind_tbl->queues_n = rss->queue_num;
14153         origin = &shared_rss->origin;
14154         origin->func = rss->func;
14155         origin->level = rss->level;
14156         /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
14157         origin->types = !rss->types ? ETH_RSS_IP : rss->types;
14158         /* NULL RSS key indicates default RSS key. */
14159         rss_key = !rss->key ? rss_hash_default_key : rss->key;
14160         memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
14161         origin->key = &shared_rss->key[0];
14162         origin->key_len = MLX5_RSS_HASH_KEY_LEN;
14163         origin->queue = queue;
14164         origin->queue_num = rss->queue_num;
14165         if (__flow_dv_action_rss_setup(dev, idx, shared_rss, error))
14166                 goto error_rss_init;
14167         rte_spinlock_init(&shared_rss->action_rss_sl);
14168         __atomic_add_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
14169         rte_spinlock_lock(&priv->shared_act_sl);
14170         ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14171                      &priv->rss_shared_actions, idx, shared_rss, next);
14172         rte_spinlock_unlock(&priv->shared_act_sl);
14173         return idx;
14174 error_rss_init:
14175         if (shared_rss) {
14176                 if (shared_rss->ind_tbl)
14177                         mlx5_free(shared_rss->ind_tbl);
14178                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14179                                 idx);
14180         }
14181         if (queue)
14182                 mlx5_free(queue);
14183         return 0;
14184 }
14185
14186 /**
14187  * Destroy the shared RSS action.
14188  * Release related hash RX queue objects.
14189  *
14190  * @param[in] dev
14191  *   Pointer to the Ethernet device structure.
14192  * @param[in] idx
14193  *   The shared RSS action object ID to be removed.
14194  * @param[out] error
14195  *   Perform verbose error reporting if not NULL. Initialized in case of
14196  *   error only.
14197  *
14198  * @return
14199  *   0 on success, otherwise negative errno value.
14200  */
14201 static int
14202 __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx,
14203                              struct rte_flow_error *error)
14204 {
14205         struct mlx5_priv *priv = dev->data->dev_private;
14206         struct mlx5_shared_action_rss *shared_rss =
14207             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
14208         uint32_t old_refcnt = 1;
14209         int remaining;
14210         uint16_t *queue = NULL;
14211
14212         if (!shared_rss)
14213                 return rte_flow_error_set(error, EINVAL,
14214                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14215                                           "invalid shared action");
14216         remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
14217         if (remaining)
14218                 return rte_flow_error_set(error, EBUSY,
14219                                           RTE_FLOW_ERROR_TYPE_ACTION,
14220                                           NULL,
14221                                           "shared rss hrxq has references");
14222         if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt,
14223                                          0, 0, __ATOMIC_ACQUIRE,
14224                                          __ATOMIC_RELAXED))
14225                 return rte_flow_error_set(error, EBUSY,
14226                                           RTE_FLOW_ERROR_TYPE_ACTION,
14227                                           NULL,
14228                                           "shared rss has references");
14229         queue = shared_rss->ind_tbl->queues;
14230         remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true);
14231         if (remaining)
14232                 return rte_flow_error_set(error, EBUSY,
14233                                           RTE_FLOW_ERROR_TYPE_ACTION,
14234                                           NULL,
14235                                           "shared rss indirection table has"
14236                                           " references");
14237         mlx5_free(queue);
14238         rte_spinlock_lock(&priv->shared_act_sl);
14239         ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14240                      &priv->rss_shared_actions, idx, shared_rss, next);
14241         rte_spinlock_unlock(&priv->shared_act_sl);
14242         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14243                         idx);
14244         return 0;
14245 }
14246
14247 /**
14248  * Create indirect action, lock free,
14249  * (mutex should be acquired by caller).
14250  * Dispatcher for action type specific call.
14251  *
14252  * @param[in] dev
14253  *   Pointer to the Ethernet device structure.
14254  * @param[in] conf
14255  *   Shared action configuration.
14256  * @param[in] action
14257  *   Action specification used to create indirect action.
14258  * @param[out] error
14259  *   Perform verbose error reporting if not NULL. Initialized in case of
14260  *   error only.
14261  *
14262  * @return
14263  *   A valid shared action handle in case of success, NULL otherwise and
14264  *   rte_errno is set.
14265  */
14266 static struct rte_flow_action_handle *
14267 flow_dv_action_create(struct rte_eth_dev *dev,
14268                       const struct rte_flow_indir_action_conf *conf,
14269                       const struct rte_flow_action *action,
14270                       struct rte_flow_error *err)
14271 {
14272         struct mlx5_priv *priv = dev->data->dev_private;
14273         uint32_t age_idx = 0;
14274         uint32_t idx = 0;
14275         uint32_t ret = 0;
14276
14277         switch (action->type) {
14278         case RTE_FLOW_ACTION_TYPE_RSS:
14279                 ret = __flow_dv_action_rss_create(dev, conf, action->conf, err);
14280                 idx = (MLX5_INDIRECT_ACTION_TYPE_RSS <<
14281                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
14282                 break;
14283         case RTE_FLOW_ACTION_TYPE_AGE:
14284                 age_idx = flow_dv_aso_age_alloc(dev, err);
14285                 if (!age_idx) {
14286                         ret = -rte_errno;
14287                         break;
14288                 }
14289                 idx = (MLX5_INDIRECT_ACTION_TYPE_AGE <<
14290                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | age_idx;
14291                 flow_dv_aso_age_params_init(dev, age_idx,
14292                                         ((const struct rte_flow_action_age *)
14293                                                 action->conf)->context ?
14294                                         ((const struct rte_flow_action_age *)
14295                                                 action->conf)->context :
14296                                         (void *)(uintptr_t)idx,
14297                                         ((const struct rte_flow_action_age *)
14298                                                 action->conf)->timeout);
14299                 ret = age_idx;
14300                 break;
14301         case RTE_FLOW_ACTION_TYPE_COUNT:
14302                 ret = flow_dv_translate_create_counter(dev, NULL, NULL, NULL);
14303                 idx = (MLX5_INDIRECT_ACTION_TYPE_COUNT <<
14304                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
14305                 break;
14306         case RTE_FLOW_ACTION_TYPE_CONNTRACK:
14307                 ret = flow_dv_translate_create_conntrack(dev, action->conf,
14308                                                          err);
14309                 idx = MLX5_INDIRECT_ACT_CT_GEN_IDX(PORT_ID(priv), ret);
14310                 break;
14311         default:
14312                 rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
14313                                    NULL, "action type not supported");
14314                 break;
14315         }
14316         return ret ? (struct rte_flow_action_handle *)(uintptr_t)idx : NULL;
14317 }
14318
14319 /**
14320  * Destroy the indirect action.
14321  * Release action related resources on the NIC and the memory.
14322  * Lock free, (mutex should be acquired by caller).
14323  * Dispatcher for action type specific call.
14324  *
14325  * @param[in] dev
14326  *   Pointer to the Ethernet device structure.
14327  * @param[in] handle
14328  *   The indirect action object handle to be removed.
14329  * @param[out] error
14330  *   Perform verbose error reporting if not NULL. Initialized in case of
14331  *   error only.
14332  *
14333  * @return
14334  *   0 on success, otherwise negative errno value.
14335  */
14336 static int
14337 flow_dv_action_destroy(struct rte_eth_dev *dev,
14338                        struct rte_flow_action_handle *handle,
14339                        struct rte_flow_error *error)
14340 {
14341         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
14342         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
14343         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
14344         struct mlx5_flow_counter *cnt;
14345         uint32_t no_flow_refcnt = 1;
14346         int ret;
14347
14348         switch (type) {
14349         case MLX5_INDIRECT_ACTION_TYPE_RSS:
14350                 return __flow_dv_action_rss_release(dev, idx, error);
14351         case MLX5_INDIRECT_ACTION_TYPE_COUNT:
14352                 cnt = flow_dv_counter_get_by_idx(dev, idx, NULL);
14353                 if (!__atomic_compare_exchange_n(&cnt->shared_info.refcnt,
14354                                                  &no_flow_refcnt, 1, false,
14355                                                  __ATOMIC_ACQUIRE,
14356                                                  __ATOMIC_RELAXED))
14357                         return rte_flow_error_set(error, EBUSY,
14358                                                   RTE_FLOW_ERROR_TYPE_ACTION,
14359                                                   NULL,
14360                                                   "Indirect count action has references");
14361                 flow_dv_counter_free(dev, idx);
14362                 return 0;
14363         case MLX5_INDIRECT_ACTION_TYPE_AGE:
14364                 ret = flow_dv_aso_age_release(dev, idx);
14365                 if (ret)
14366                         /*
14367                          * In this case, the last flow has a reference will
14368                          * actually release the age action.
14369                          */
14370                         DRV_LOG(DEBUG, "Indirect age action %" PRIu32 " was"
14371                                 " released with references %d.", idx, ret);
14372                 return 0;
14373         case MLX5_INDIRECT_ACTION_TYPE_CT:
14374                 ret = flow_dv_aso_ct_release(dev, idx);
14375                 if (ret < 0)
14376                         return ret;
14377                 if (ret > 0)
14378                         DRV_LOG(DEBUG, "Connection tracking object %u still "
14379                                 "has references %d.", idx, ret);
14380                 return 0;
14381         default:
14382                 return rte_flow_error_set(error, ENOTSUP,
14383                                           RTE_FLOW_ERROR_TYPE_ACTION,
14384                                           NULL,
14385                                           "action type not supported");
14386         }
14387 }
14388
14389 /**
14390  * Updates in place shared RSS action configuration.
14391  *
14392  * @param[in] dev
14393  *   Pointer to the Ethernet device structure.
14394  * @param[in] idx
14395  *   The shared RSS action object ID to be updated.
14396  * @param[in] action_conf
14397  *   RSS action specification used to modify *shared_rss*.
14398  * @param[out] error
14399  *   Perform verbose error reporting if not NULL. Initialized in case of
14400  *   error only.
14401  *
14402  * @return
14403  *   0 on success, otherwise negative errno value.
14404  * @note: currently only support update of RSS queues.
14405  */
14406 static int
14407 __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx,
14408                             const struct rte_flow_action_rss *action_conf,
14409                             struct rte_flow_error *error)
14410 {
14411         struct mlx5_priv *priv = dev->data->dev_private;
14412         struct mlx5_shared_action_rss *shared_rss =
14413             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
14414         int ret = 0;
14415         void *queue = NULL;
14416         uint16_t *queue_old = NULL;
14417         uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);
14418
14419         if (!shared_rss)
14420                 return rte_flow_error_set(error, EINVAL,
14421                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14422                                           "invalid shared action to update");
14423         if (priv->obj_ops.ind_table_modify == NULL)
14424                 return rte_flow_error_set(error, ENOTSUP,
14425                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14426                                           "cannot modify indirection table");
14427         queue = mlx5_malloc(MLX5_MEM_ZERO,
14428                             RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
14429                             0, SOCKET_ID_ANY);
14430         if (!queue)
14431                 return rte_flow_error_set(error, ENOMEM,
14432                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14433                                           NULL,
14434                                           "cannot allocate resource memory");
14435         memcpy(queue, action_conf->queue, queue_size);
14436         MLX5_ASSERT(shared_rss->ind_tbl);
14437         rte_spinlock_lock(&shared_rss->action_rss_sl);
14438         queue_old = shared_rss->ind_tbl->queues;
14439         ret = mlx5_ind_table_obj_modify(dev, shared_rss->ind_tbl,
14440                                         queue, action_conf->queue_num, true);
14441         if (ret) {
14442                 mlx5_free(queue);
14443                 ret = rte_flow_error_set(error, rte_errno,
14444                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14445                                           "cannot update indirection table");
14446         } else {
14447                 mlx5_free(queue_old);
14448                 shared_rss->origin.queue = queue;
14449                 shared_rss->origin.queue_num = action_conf->queue_num;
14450         }
14451         rte_spinlock_unlock(&shared_rss->action_rss_sl);
14452         return ret;
14453 }
14454
14455 /*
14456  * Updates in place conntrack context or direction.
14457  * Context update should be synchronized.
14458  *
14459  * @param[in] dev
14460  *   Pointer to the Ethernet device structure.
14461  * @param[in] idx
14462  *   The conntrack object ID to be updated.
14463  * @param[in] update
14464  *   Pointer to the structure of information to update.
14465  * @param[out] error
14466  *   Perform verbose error reporting if not NULL. Initialized in case of
14467  *   error only.
14468  *
14469  * @return
14470  *   0 on success, otherwise negative errno value.
14471  */
14472 static int
14473 __flow_dv_action_ct_update(struct rte_eth_dev *dev, uint32_t idx,
14474                            const struct rte_flow_modify_conntrack *update,
14475                            struct rte_flow_error *error)
14476 {
14477         struct mlx5_priv *priv = dev->data->dev_private;
14478         struct mlx5_aso_ct_action *ct;
14479         const struct rte_flow_action_conntrack *new_prf;
14480         int ret = 0;
14481         uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx);
14482         uint32_t dev_idx;
14483
14484         if (PORT_ID(priv) != owner)
14485                 return rte_flow_error_set(error, EACCES,
14486                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14487                                           NULL,
14488                                           "CT object owned by another port");
14489         dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx);
14490         ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx);
14491         if (!ct->refcnt)
14492                 return rte_flow_error_set(error, ENOMEM,
14493                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14494                                           NULL,
14495                                           "CT object is inactive");
14496         new_prf = &update->new_ct;
14497         if (update->direction)
14498                 ct->is_original = !!new_prf->is_original_dir;
14499         if (update->state) {
14500                 /* Only validate the profile when it needs to be updated. */
14501                 ret = mlx5_validate_action_ct(dev, new_prf, error);
14502                 if (ret)
14503                         return ret;
14504                 ret = mlx5_aso_ct_update_by_wqe(priv->sh, ct, new_prf);
14505                 if (ret)
14506                         return rte_flow_error_set(error, EIO,
14507                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14508                                         NULL,
14509                                         "Failed to send CT context update WQE");
14510                 /* Block until ready or a failure. */
14511                 ret = mlx5_aso_ct_available(priv->sh, ct);
14512                 if (ret)
14513                         rte_flow_error_set(error, rte_errno,
14514                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14515                                            NULL,
14516                                            "Timeout to get the CT update");
14517         }
14518         return ret;
14519 }
14520
14521 /**
14522  * Updates in place shared action configuration, lock free,
14523  * (mutex should be acquired by caller).
14524  *
14525  * @param[in] dev
14526  *   Pointer to the Ethernet device structure.
14527  * @param[in] handle
14528  *   The indirect action object handle to be updated.
14529  * @param[in] update
14530  *   Action specification used to modify the action pointed by *handle*.
14531  *   *update* could be of same type with the action pointed by the *handle*
14532  *   handle argument, or some other structures like a wrapper, depending on
14533  *   the indirect action type.
14534  * @param[out] error
14535  *   Perform verbose error reporting if not NULL. Initialized in case of
14536  *   error only.
14537  *
14538  * @return
14539  *   0 on success, otherwise negative errno value.
14540  */
14541 static int
14542 flow_dv_action_update(struct rte_eth_dev *dev,
14543                         struct rte_flow_action_handle *handle,
14544                         const void *update,
14545                         struct rte_flow_error *err)
14546 {
14547         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
14548         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
14549         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
14550         const void *action_conf;
14551
14552         switch (type) {
14553         case MLX5_INDIRECT_ACTION_TYPE_RSS:
14554                 action_conf = ((const struct rte_flow_action *)update)->conf;
14555                 return __flow_dv_action_rss_update(dev, idx, action_conf, err);
14556         case MLX5_INDIRECT_ACTION_TYPE_CT:
14557                 return __flow_dv_action_ct_update(dev, idx, update, err);
14558         default:
14559                 return rte_flow_error_set(err, ENOTSUP,
14560                                           RTE_FLOW_ERROR_TYPE_ACTION,
14561                                           NULL,
14562                                           "action type update not supported");
14563         }
14564 }
14565
14566 /**
14567  * Destroy the meter sub policy table rules.
14568  * Lock free, (mutex should be acquired by caller).
14569  *
14570  * @param[in] dev
14571  *   Pointer to Ethernet device.
14572  * @param[in] sub_policy
14573  *   Pointer to meter sub policy table.
14574  */
14575 static void
14576 __flow_dv_destroy_sub_policy_rules(struct rte_eth_dev *dev,
14577                              struct mlx5_flow_meter_sub_policy *sub_policy)
14578 {
14579         struct mlx5_flow_tbl_data_entry *tbl;
14580         int i;
14581
14582         for (i = 0; i < RTE_COLORS; i++) {
14583                 if (sub_policy->color_rule[i]) {
14584                         claim_zero(mlx5_flow_os_destroy_flow
14585                                 (sub_policy->color_rule[i]));
14586                         sub_policy->color_rule[i] = NULL;
14587                 }
14588                 if (sub_policy->color_matcher[i]) {
14589                         tbl = container_of(sub_policy->color_matcher[i]->tbl,
14590                                 typeof(*tbl), tbl);
14591                         mlx5_cache_unregister(&tbl->matchers,
14592                                       &sub_policy->color_matcher[i]->entry);
14593                         sub_policy->color_matcher[i] = NULL;
14594                 }
14595         }
14596         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
14597                 if (sub_policy->rix_hrxq[i]) {
14598                         mlx5_hrxq_release(dev, sub_policy->rix_hrxq[i]);
14599                         sub_policy->rix_hrxq[i] = 0;
14600                 }
14601                 if (sub_policy->jump_tbl[i]) {
14602                         flow_dv_tbl_resource_release(MLX5_SH(dev),
14603                         sub_policy->jump_tbl[i]);
14604                         sub_policy->jump_tbl[i] = NULL;
14605                 }
14606         }
14607         if (sub_policy->tbl_rsc) {
14608                 flow_dv_tbl_resource_release(MLX5_SH(dev),
14609                         sub_policy->tbl_rsc);
14610                 sub_policy->tbl_rsc = NULL;
14611         }
14612 }
14613
14614 /**
14615  * Destroy policy rules, lock free,
14616  * (mutex should be acquired by caller).
14617  * Dispatcher for action type specific call.
14618  *
14619  * @param[in] dev
14620  *   Pointer to the Ethernet device structure.
14621  * @param[in] mtr_policy
14622  *   Meter policy struct.
14623  */
14624 static void
14625 flow_dv_destroy_policy_rules(struct rte_eth_dev *dev,
14626                       struct mlx5_flow_meter_policy *mtr_policy)
14627 {
14628         uint32_t i, j;
14629         struct mlx5_flow_meter_sub_policy *sub_policy;
14630         uint16_t sub_policy_num;
14631
14632         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
14633                 sub_policy_num = (mtr_policy->sub_policy_num >>
14634                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
14635                         MLX5_MTR_SUB_POLICY_NUM_MASK;
14636                 for (j = 0; j < sub_policy_num; j++) {
14637                         sub_policy = mtr_policy->sub_policys[i][j];
14638                         if (sub_policy)
14639                                 __flow_dv_destroy_sub_policy_rules
14640                                                 (dev, sub_policy);
14641                 }
14642         }
14643 }
14644
14645 /**
14646  * Destroy policy action, lock free,
14647  * (mutex should be acquired by caller).
14648  * Dispatcher for action type specific call.
14649  *
14650  * @param[in] dev
14651  *   Pointer to the Ethernet device structure.
14652  * @param[in] mtr_policy
14653  *   Meter policy struct.
14654  */
14655 static void
14656 flow_dv_destroy_mtr_policy_acts(struct rte_eth_dev *dev,
14657                       struct mlx5_flow_meter_policy *mtr_policy)
14658 {
14659         struct rte_flow_action *rss_action;
14660         struct mlx5_flow_handle dev_handle;
14661         uint32_t i, j;
14662
14663         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
14664                 if (mtr_policy->act_cnt[i].rix_mark) {
14665                         flow_dv_tag_release(dev,
14666                                 mtr_policy->act_cnt[i].rix_mark);
14667                         mtr_policy->act_cnt[i].rix_mark = 0;
14668                 }
14669                 if (mtr_policy->act_cnt[i].modify_hdr) {
14670                         dev_handle.dvh.modify_hdr =
14671                                 mtr_policy->act_cnt[i].modify_hdr;
14672                         flow_dv_modify_hdr_resource_release(dev, &dev_handle);
14673                 }
14674                 switch (mtr_policy->act_cnt[i].fate_action) {
14675                 case MLX5_FLOW_FATE_SHARED_RSS:
14676                         rss_action = mtr_policy->act_cnt[i].rss;
14677                         mlx5_free(rss_action);
14678                         break;
14679                 case MLX5_FLOW_FATE_PORT_ID:
14680                         if (mtr_policy->act_cnt[i].rix_port_id_action) {
14681                                 flow_dv_port_id_action_resource_release(dev,
14682                                 mtr_policy->act_cnt[i].rix_port_id_action);
14683                                 mtr_policy->act_cnt[i].rix_port_id_action = 0;
14684                         }
14685                         break;
14686                 case MLX5_FLOW_FATE_DROP:
14687                 case MLX5_FLOW_FATE_JUMP:
14688                         for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
14689                                 mtr_policy->act_cnt[i].dr_jump_action[j] =
14690                                                 NULL;
14691                         break;
14692                 default:
14693                         /*Queue action do nothing*/
14694                         break;
14695                 }
14696         }
14697         for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
14698                 mtr_policy->dr_drop_action[j] = NULL;
14699 }
14700
14701 /**
14702  * Create policy action per domain, lock free,
14703  * (mutex should be acquired by caller).
14704  * Dispatcher for action type specific call.
14705  *
14706  * @param[in] dev
14707  *   Pointer to the Ethernet device structure.
14708  * @param[in] mtr_policy
14709  *   Meter policy struct.
14710  * @param[in] action
14711  *   Action specification used to create meter actions.
14712  * @param[out] error
14713  *   Perform verbose error reporting if not NULL. Initialized in case of
14714  *   error only.
14715  *
14716  * @return
14717  *   0 on success, otherwise negative errno value.
14718  */
14719 static int
14720 __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,
14721                         struct mlx5_flow_meter_policy *mtr_policy,
14722                         const struct rte_flow_action *actions[RTE_COLORS],
14723                         enum mlx5_meter_domain domain,
14724                         struct rte_mtr_error *error)
14725 {
14726         struct mlx5_priv *priv = dev->data->dev_private;
14727         struct rte_flow_error flow_err;
14728         const struct rte_flow_action *act;
14729         uint64_t action_flags = 0;
14730         struct mlx5_flow_handle dh;
14731         struct mlx5_flow dev_flow;
14732         struct mlx5_flow_dv_port_id_action_resource port_id_action;
14733         int i, ret;
14734         uint8_t egress, transfer;
14735         struct mlx5_meter_policy_action_container *act_cnt = NULL;
14736         union {
14737                 struct mlx5_flow_dv_modify_hdr_resource res;
14738                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
14739                             sizeof(struct mlx5_modification_cmd) *
14740                             (MLX5_MAX_MODIFY_NUM + 1)];
14741         } mhdr_dummy;
14742
14743         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
14744         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
14745         memset(&dh, 0, sizeof(struct mlx5_flow_handle));
14746         memset(&dev_flow, 0, sizeof(struct mlx5_flow));
14747         memset(&port_id_action, 0,
14748                 sizeof(struct mlx5_flow_dv_port_id_action_resource));
14749         dev_flow.handle = &dh;
14750         dev_flow.dv.port_id_action = &port_id_action;
14751         dev_flow.external = true;
14752         for (i = 0; i < RTE_COLORS; i++) {
14753                 if (i < MLX5_MTR_RTE_COLORS)
14754                         act_cnt = &mtr_policy->act_cnt[i];
14755                 for (act = actions[i];
14756                         act && act->type != RTE_FLOW_ACTION_TYPE_END;
14757                         act++) {
14758                         switch (act->type) {
14759                         case RTE_FLOW_ACTION_TYPE_MARK:
14760                         {
14761                                 uint32_t tag_be = mlx5_flow_mark_set
14762                                         (((const struct rte_flow_action_mark *)
14763                                         (act->conf))->id);
14764
14765                                 if (i >= MLX5_MTR_RTE_COLORS)
14766                                         return -rte_mtr_error_set(error,
14767                                           ENOTSUP,
14768                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
14769                                           NULL,
14770                                           "cannot create policy "
14771                                           "mark action for this color");
14772                                 dev_flow.handle->mark = 1;
14773                                 if (flow_dv_tag_resource_register(dev, tag_be,
14774                                                   &dev_flow, &flow_err))
14775                                         return -rte_mtr_error_set(error,
14776                                         ENOTSUP,
14777                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14778                                         NULL,
14779                                         "cannot setup policy mark action");
14780                                 MLX5_ASSERT(dev_flow.dv.tag_resource);
14781                                 act_cnt->rix_mark =
14782                                         dev_flow.handle->dvh.rix_tag;
14783                                 action_flags |= MLX5_FLOW_ACTION_MARK;
14784                                 break;
14785                         }
14786                         case RTE_FLOW_ACTION_TYPE_SET_TAG:
14787                         {
14788                                 struct mlx5_flow_dv_modify_hdr_resource
14789                                         *mhdr_res = &mhdr_dummy.res;
14790
14791                                 if (i >= MLX5_MTR_RTE_COLORS)
14792                                         return -rte_mtr_error_set(error,
14793                                           ENOTSUP,
14794                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
14795                                           NULL,
14796                                           "cannot create policy "
14797                                           "set tag action for this color");
14798                                 memset(mhdr_res, 0, sizeof(*mhdr_res));
14799                                 mhdr_res->ft_type = transfer ?
14800                                         MLX5DV_FLOW_TABLE_TYPE_FDB :
14801                                         egress ?
14802                                         MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
14803                                         MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
14804                                 if (flow_dv_convert_action_set_tag
14805                                 (dev, mhdr_res,
14806                                 (const struct rte_flow_action_set_tag *)
14807                                 act->conf,  &flow_err))
14808                                         return -rte_mtr_error_set(error,
14809                                         ENOTSUP,
14810                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14811                                         NULL, "cannot convert policy "
14812                                         "set tag action");
14813                                 if (!mhdr_res->actions_num)
14814                                         return -rte_mtr_error_set(error,
14815                                         ENOTSUP,
14816                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14817                                         NULL, "cannot find policy "
14818                                         "set tag action");
14819                                 /* create modify action if needed. */
14820                                 dev_flow.dv.group = 1;
14821                                 if (flow_dv_modify_hdr_resource_register
14822                                         (dev, mhdr_res, &dev_flow, &flow_err))
14823                                         return -rte_mtr_error_set(error,
14824                                         ENOTSUP,
14825                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14826                                         NULL, "cannot register policy "
14827                                         "set tag action");
14828                                 act_cnt->modify_hdr =
14829                                 dev_flow.handle->dvh.modify_hdr;
14830                                 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
14831                                 break;
14832                         }
14833                         case RTE_FLOW_ACTION_TYPE_DROP:
14834                         {
14835                                 struct mlx5_flow_mtr_mng *mtrmng =
14836                                                 priv->sh->mtrmng;
14837                                 struct mlx5_flow_tbl_data_entry *tbl_data;
14838
14839                                 /*
14840                                  * Create the drop table with
14841                                  * METER DROP level.
14842                                  */
14843                                 if (!mtrmng->drop_tbl[domain]) {
14844                                         mtrmng->drop_tbl[domain] =
14845                                         flow_dv_tbl_resource_get(dev,
14846                                         MLX5_FLOW_TABLE_LEVEL_METER,
14847                                         egress, transfer, false, NULL, 0,
14848                                         0, MLX5_MTR_TABLE_ID_DROP, &flow_err);
14849                                         if (!mtrmng->drop_tbl[domain])
14850                                                 return -rte_mtr_error_set
14851                                         (error, ENOTSUP,
14852                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14853                                         NULL,
14854                                         "Failed to create meter drop table");
14855                                 }
14856                                 tbl_data = container_of
14857                                 (mtrmng->drop_tbl[domain],
14858                                 struct mlx5_flow_tbl_data_entry, tbl);
14859                                 if (i < MLX5_MTR_RTE_COLORS) {
14860                                         act_cnt->dr_jump_action[domain] =
14861                                                 tbl_data->jump.action;
14862                                         act_cnt->fate_action =
14863                                                 MLX5_FLOW_FATE_DROP;
14864                                 }
14865                                 if (i == RTE_COLOR_RED)
14866                                         mtr_policy->dr_drop_action[domain] =
14867                                                 tbl_data->jump.action;
14868                                 action_flags |= MLX5_FLOW_ACTION_DROP;
14869                                 break;
14870                         }
14871                         case RTE_FLOW_ACTION_TYPE_QUEUE:
14872                         {
14873                                 if (i >= MLX5_MTR_RTE_COLORS)
14874                                         return -rte_mtr_error_set(error,
14875                                         ENOTSUP,
14876                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14877                                         NULL, "cannot create policy "
14878                                         "fate queue for this color");
14879                                 act_cnt->queue =
14880                                 ((const struct rte_flow_action_queue *)
14881                                         (act->conf))->index;
14882                                 act_cnt->fate_action =
14883                                         MLX5_FLOW_FATE_QUEUE;
14884                                 dev_flow.handle->fate_action =
14885                                         MLX5_FLOW_FATE_QUEUE;
14886                                 mtr_policy->is_queue = 1;
14887                                 action_flags |= MLX5_FLOW_ACTION_QUEUE;
14888                                 break;
14889                         }
14890                         case RTE_FLOW_ACTION_TYPE_RSS:
14891                         {
14892                                 int rss_size;
14893
14894                                 if (i >= MLX5_MTR_RTE_COLORS)
14895                                         return -rte_mtr_error_set(error,
14896                                           ENOTSUP,
14897                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
14898                                           NULL,
14899                                           "cannot create policy "
14900                                           "rss action for this color");
14901                                 /*
14902                                  * Save RSS conf into policy struct
14903                                  * for translate stage.
14904                                  */
14905                                 rss_size = (int)rte_flow_conv
14906                                         (RTE_FLOW_CONV_OP_ACTION,
14907                                         NULL, 0, act, &flow_err);
14908                                 if (rss_size <= 0)
14909                                         return -rte_mtr_error_set(error,
14910                                           ENOTSUP,
14911                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
14912                                           NULL, "Get the wrong "
14913                                           "rss action struct size");
14914                                 act_cnt->rss = mlx5_malloc(MLX5_MEM_ZERO,
14915                                                 rss_size, 0, SOCKET_ID_ANY);
14916                                 if (!act_cnt->rss)
14917                                         return -rte_mtr_error_set(error,
14918                                           ENOTSUP,
14919                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
14920                                           NULL,
14921                                           "Fail to malloc rss action memory");
14922                                 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION,
14923                                         act_cnt->rss, rss_size,
14924                                         act, &flow_err);
14925                                 if (ret < 0)
14926                                         return -rte_mtr_error_set(error,
14927                                           ENOTSUP,
14928                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
14929                                           NULL, "Fail to save "
14930                                           "rss action into policy struct");
14931                                 act_cnt->fate_action =
14932                                         MLX5_FLOW_FATE_SHARED_RSS;
14933                                 action_flags |= MLX5_FLOW_ACTION_RSS;
14934                                 break;
14935                         }
14936                         case RTE_FLOW_ACTION_TYPE_PORT_ID:
14937                         {
14938                                 struct mlx5_flow_dv_port_id_action_resource
14939                                         port_id_resource;
14940                                 uint32_t port_id = 0;
14941
14942                                 if (i >= MLX5_MTR_RTE_COLORS)
14943                                         return -rte_mtr_error_set(error,
14944                                         ENOTSUP,
14945                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14946                                         NULL, "cannot create policy "
14947                                         "port action for this color");
14948                                 memset(&port_id_resource, 0,
14949                                         sizeof(port_id_resource));
14950                                 if (flow_dv_translate_action_port_id(dev, act,
14951                                                 &port_id, &flow_err))
14952                                         return -rte_mtr_error_set(error,
14953                                         ENOTSUP,
14954                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14955                                         NULL, "cannot translate "
14956                                         "policy port action");
14957                                 port_id_resource.port_id = port_id;
14958                                 if (flow_dv_port_id_action_resource_register
14959                                         (dev, &port_id_resource,
14960                                         &dev_flow, &flow_err))
14961                                         return -rte_mtr_error_set(error,
14962                                         ENOTSUP,
14963                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14964                                         NULL, "cannot setup "
14965                                         "policy port action");
14966                                 act_cnt->rix_port_id_action =
14967                                         dev_flow.handle->rix_port_id_action;
14968                                 act_cnt->fate_action =
14969                                         MLX5_FLOW_FATE_PORT_ID;
14970                                 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
14971                                 break;
14972                         }
14973                         case RTE_FLOW_ACTION_TYPE_JUMP:
14974                         {
14975                                 uint32_t jump_group = 0;
14976                                 uint32_t table = 0;
14977                                 struct mlx5_flow_tbl_data_entry *tbl_data;
14978                                 struct flow_grp_info grp_info = {
14979                                         .external = !!dev_flow.external,
14980                                         .transfer = !!transfer,
14981                                         .fdb_def_rule = !!priv->fdb_def_rule,
14982                                         .std_tbl_fix = 0,
14983                                         .skip_scale = dev_flow.skip_scale &
14984                                         (1 << MLX5_SCALE_FLOW_GROUP_BIT),
14985                                 };
14986                                 struct mlx5_flow_meter_sub_policy *sub_policy =
14987                                 mtr_policy->sub_policys[domain][0];
14988
14989                                 if (i >= MLX5_MTR_RTE_COLORS)
14990                                         return -rte_mtr_error_set(error,
14991                                           ENOTSUP,
14992                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
14993                                           NULL,
14994                                           "cannot create policy "
14995                                           "jump action for this color");
14996                                 jump_group =
14997                                 ((const struct rte_flow_action_jump *)
14998                                                         act->conf)->group;
14999                                 if (mlx5_flow_group_to_table(dev, NULL,
15000                                                        jump_group,
15001                                                        &table,
15002                                                        &grp_info, &flow_err))
15003                                         return -rte_mtr_error_set(error,
15004                                         ENOTSUP,
15005                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15006                                         NULL, "cannot setup "
15007                                         "policy jump action");
15008                                 sub_policy->jump_tbl[i] =
15009                                 flow_dv_tbl_resource_get(dev,
15010                                         table, egress,
15011                                         transfer,
15012                                         !!dev_flow.external,
15013                                         NULL, jump_group, 0,
15014                                         0, &flow_err);
15015                                 if
15016                                 (!sub_policy->jump_tbl[i])
15017                                         return  -rte_mtr_error_set(error,
15018                                         ENOTSUP,
15019                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15020                                         NULL, "cannot create jump action.");
15021                                 tbl_data = container_of
15022                                 (sub_policy->jump_tbl[i],
15023                                 struct mlx5_flow_tbl_data_entry, tbl);
15024                                 act_cnt->dr_jump_action[domain] =
15025                                         tbl_data->jump.action;
15026                                 act_cnt->fate_action =
15027                                         MLX5_FLOW_FATE_JUMP;
15028                                 action_flags |= MLX5_FLOW_ACTION_JUMP;
15029                                 break;
15030                         }
15031                         default:
15032                                 return -rte_mtr_error_set(error, ENOTSUP,
15033                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15034                                           NULL, "action type not supported");
15035                         }
15036                 }
15037         }
15038         return 0;
15039 }
15040
15041 /**
15042  * Create policy action per domain, lock free,
15043  * (mutex should be acquired by caller).
15044  * Dispatcher for action type specific call.
15045  *
15046  * @param[in] dev
15047  *   Pointer to the Ethernet device structure.
15048  * @param[in] mtr_policy
15049  *   Meter policy struct.
15050  * @param[in] action
15051  *   Action specification used to create meter actions.
15052  * @param[out] error
15053  *   Perform verbose error reporting if not NULL. Initialized in case of
15054  *   error only.
15055  *
15056  * @return
15057  *   0 on success, otherwise negative errno value.
15058  */
15059 static int
15060 flow_dv_create_mtr_policy_acts(struct rte_eth_dev *dev,
15061                       struct mlx5_flow_meter_policy *mtr_policy,
15062                       const struct rte_flow_action *actions[RTE_COLORS],
15063                       struct rte_mtr_error *error)
15064 {
15065         int ret, i;
15066         uint16_t sub_policy_num;
15067
15068         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15069                 sub_policy_num = (mtr_policy->sub_policy_num >>
15070                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
15071                         MLX5_MTR_SUB_POLICY_NUM_MASK;
15072                 if (sub_policy_num) {
15073                         ret = __flow_dv_create_domain_policy_acts(dev,
15074                                 mtr_policy, actions,
15075                                 (enum mlx5_meter_domain)i, error);
15076                         if (ret)
15077                                 return ret;
15078                 }
15079         }
15080         return 0;
15081 }
15082
15083 /**
15084  * Query a DV flow rule for its statistics via DevX.
15085  *
15086  * @param[in] dev
15087  *   Pointer to Ethernet device.
15088  * @param[in] cnt_idx
15089  *   Index to the flow counter.
15090  * @param[out] data
15091  *   Data retrieved by the query.
15092  * @param[out] error
15093  *   Perform verbose error reporting if not NULL.
15094  *
15095  * @return
15096  *   0 on success, a negative errno value otherwise and rte_errno is set.
15097  */
15098 static int
15099 flow_dv_query_count(struct rte_eth_dev *dev, uint32_t cnt_idx, void *data,
15100                     struct rte_flow_error *error)
15101 {
15102         struct mlx5_priv *priv = dev->data->dev_private;
15103         struct rte_flow_query_count *qc = data;
15104
15105         if (!priv->config.devx)
15106                 return rte_flow_error_set(error, ENOTSUP,
15107                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15108                                           NULL,
15109                                           "counters are not supported");
15110         if (cnt_idx) {
15111                 uint64_t pkts, bytes;
15112                 struct mlx5_flow_counter *cnt;
15113                 int err = _flow_dv_query_count(dev, cnt_idx, &pkts, &bytes);
15114
15115                 if (err)
15116                         return rte_flow_error_set(error, -err,
15117                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15118                                         NULL, "cannot read counters");
15119                 cnt = flow_dv_counter_get_by_idx(dev, cnt_idx, NULL);
15120                 qc->hits_set = 1;
15121                 qc->bytes_set = 1;
15122                 qc->hits = pkts - cnt->hits;
15123                 qc->bytes = bytes - cnt->bytes;
15124                 if (qc->reset) {
15125                         cnt->hits = pkts;
15126                         cnt->bytes = bytes;
15127                 }
15128                 return 0;
15129         }
15130         return rte_flow_error_set(error, EINVAL,
15131                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15132                                   NULL,
15133                                   "counters are not available");
15134 }
15135
15136 static int
15137 flow_dv_action_query(struct rte_eth_dev *dev,
15138                      const struct rte_flow_action_handle *handle, void *data,
15139                      struct rte_flow_error *error)
15140 {
15141         struct mlx5_age_param *age_param;
15142         struct rte_flow_query_age *resp;
15143         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
15144         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
15145         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
15146         struct mlx5_priv *priv = dev->data->dev_private;
15147         struct mlx5_aso_ct_action *ct;
15148         uint16_t owner;
15149         uint32_t dev_idx;
15150
15151         switch (type) {
15152         case MLX5_INDIRECT_ACTION_TYPE_AGE:
15153                 age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params;
15154                 resp = data;
15155                 resp->aged = __atomic_load_n(&age_param->state,
15156                                               __ATOMIC_RELAXED) == AGE_TMOUT ?
15157                                                                           1 : 0;
15158                 resp->sec_since_last_hit_valid = !resp->aged;
15159                 if (resp->sec_since_last_hit_valid)
15160                         resp->sec_since_last_hit = __atomic_load_n
15161                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
15162                 return 0;
15163         case MLX5_INDIRECT_ACTION_TYPE_COUNT:
15164                 return flow_dv_query_count(dev, idx, data, error);
15165         case MLX5_INDIRECT_ACTION_TYPE_CT:
15166                 owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx);
15167                 if (owner != PORT_ID(priv))
15168                         return rte_flow_error_set(error, EACCES,
15169                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15170                                         NULL,
15171                                         "CT object owned by another port");
15172                 dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx);
15173                 ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx);
15174                 MLX5_ASSERT(ct);
15175                 if (!ct->refcnt)
15176                         return rte_flow_error_set(error, EFAULT,
15177                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15178                                         NULL,
15179                                         "CT object is inactive");
15180                 ((struct rte_flow_action_conntrack *)data)->peer_port =
15181                                                         ct->peer;
15182                 ((struct rte_flow_action_conntrack *)data)->is_original_dir =
15183                                                         ct->is_original;
15184                 if (mlx5_aso_ct_query_by_wqe(priv->sh, ct, data))
15185                         return rte_flow_error_set(error, EIO,
15186                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15187                                         NULL,
15188                                         "Failed to query CT context");
15189                 return 0;
15190         default:
15191                 return rte_flow_error_set(error, ENOTSUP,
15192                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
15193                                           "action type query not supported");
15194         }
15195 }
15196
15197 /**
15198  * Query a flow rule AGE action for aging information.
15199  *
15200  * @param[in] dev
15201  *   Pointer to Ethernet device.
15202  * @param[in] flow
15203  *   Pointer to the sub flow.
15204  * @param[out] data
15205  *   data retrieved by the query.
15206  * @param[out] error
15207  *   Perform verbose error reporting if not NULL.
15208  *
15209  * @return
15210  *   0 on success, a negative errno value otherwise and rte_errno is set.
15211  */
15212 static int
15213 flow_dv_query_age(struct rte_eth_dev *dev, struct rte_flow *flow,
15214                   void *data, struct rte_flow_error *error)
15215 {
15216         struct rte_flow_query_age *resp = data;
15217         struct mlx5_age_param *age_param;
15218
15219         if (flow->age) {
15220                 struct mlx5_aso_age_action *act =
15221                                      flow_aso_age_get_by_idx(dev, flow->age);
15222
15223                 age_param = &act->age_params;
15224         } else if (flow->counter) {
15225                 age_param = flow_dv_counter_idx_get_age(dev, flow->counter);
15226
15227                 if (!age_param || !age_param->timeout)
15228                         return rte_flow_error_set
15229                                         (error, EINVAL,
15230                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15231                                          NULL, "cannot read age data");
15232         } else {
15233                 return rte_flow_error_set(error, EINVAL,
15234                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15235                                           NULL, "age data not available");
15236         }
15237         resp->aged = __atomic_load_n(&age_param->state, __ATOMIC_RELAXED) ==
15238                                      AGE_TMOUT ? 1 : 0;
15239         resp->sec_since_last_hit_valid = !resp->aged;
15240         if (resp->sec_since_last_hit_valid)
15241                 resp->sec_since_last_hit = __atomic_load_n
15242                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
15243         return 0;
15244 }
15245
15246 /**
15247  * Query a flow.
15248  *
15249  * @see rte_flow_query()
15250  * @see rte_flow_ops
15251  */
15252 static int
15253 flow_dv_query(struct rte_eth_dev *dev,
15254               struct rte_flow *flow __rte_unused,
15255               const struct rte_flow_action *actions __rte_unused,
15256               void *data __rte_unused,
15257               struct rte_flow_error *error __rte_unused)
15258 {
15259         int ret = -EINVAL;
15260
15261         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
15262                 switch (actions->type) {
15263                 case RTE_FLOW_ACTION_TYPE_VOID:
15264                         break;
15265                 case RTE_FLOW_ACTION_TYPE_COUNT:
15266                         ret = flow_dv_query_count(dev, flow->counter, data,
15267                                                   error);
15268                         break;
15269                 case RTE_FLOW_ACTION_TYPE_AGE:
15270                         ret = flow_dv_query_age(dev, flow, data, error);
15271                         break;
15272                 default:
15273                         return rte_flow_error_set(error, ENOTSUP,
15274                                                   RTE_FLOW_ERROR_TYPE_ACTION,
15275                                                   actions,
15276                                                   "action not supported");
15277                 }
15278         }
15279         return ret;
15280 }
15281
15282 /**
15283  * Destroy the meter table set.
15284  * Lock free, (mutex should be acquired by caller).
15285  *
15286  * @param[in] dev
15287  *   Pointer to Ethernet device.
15288  * @param[in] fm
15289  *   Meter information table.
15290  */
15291 static void
15292 flow_dv_destroy_mtr_tbls(struct rte_eth_dev *dev,
15293                         struct mlx5_flow_meter_info *fm)
15294 {
15295         struct mlx5_priv *priv = dev->data->dev_private;
15296         int i;
15297
15298         if (!fm || !priv->config.dv_flow_en)
15299                 return;
15300         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15301                 if (fm->drop_rule[i]) {
15302                         claim_zero(mlx5_flow_os_destroy_flow(fm->drop_rule[i]));
15303                         fm->drop_rule[i] = NULL;
15304                 }
15305         }
15306 }
15307
15308 static void
15309 flow_dv_destroy_mtr_drop_tbls(struct rte_eth_dev *dev)
15310 {
15311         struct mlx5_priv *priv = dev->data->dev_private;
15312         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
15313         struct mlx5_flow_tbl_data_entry *tbl;
15314         int i, j;
15315
15316         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15317                 if (mtrmng->def_rule[i]) {
15318                         claim_zero(mlx5_flow_os_destroy_flow
15319                                         (mtrmng->def_rule[i]));
15320                         mtrmng->def_rule[i] = NULL;
15321                 }
15322                 if (mtrmng->def_matcher[i]) {
15323                         tbl = container_of(mtrmng->def_matcher[i]->tbl,
15324                                 struct mlx5_flow_tbl_data_entry, tbl);
15325                         mlx5_cache_unregister(&tbl->matchers,
15326                                       &mtrmng->def_matcher[i]->entry);
15327                         mtrmng->def_matcher[i] = NULL;
15328                 }
15329                 for (j = 0; j < MLX5_REG_BITS; j++) {
15330                         if (mtrmng->drop_matcher[i][j]) {
15331                                 tbl =
15332                                 container_of(mtrmng->drop_matcher[i][j]->tbl,
15333                                              struct mlx5_flow_tbl_data_entry,
15334                                              tbl);
15335                                 mlx5_cache_unregister(&tbl->matchers,
15336                                         &mtrmng->drop_matcher[i][j]->entry);
15337                                 mtrmng->drop_matcher[i][j] = NULL;
15338                         }
15339                 }
15340                 if (mtrmng->drop_tbl[i]) {
15341                         flow_dv_tbl_resource_release(MLX5_SH(dev),
15342                                 mtrmng->drop_tbl[i]);
15343                         mtrmng->drop_tbl[i] = NULL;
15344                 }
15345         }
15346 }
15347
15348 /* Number of meter flow actions, count and jump or count and drop. */
15349 #define METER_ACTIONS 2
15350
15351 static void
15352 __flow_dv_destroy_domain_def_policy(struct rte_eth_dev *dev,
15353                               enum mlx5_meter_domain domain)
15354 {
15355         struct mlx5_priv *priv = dev->data->dev_private;
15356         struct mlx5_flow_meter_def_policy *def_policy =
15357                         priv->sh->mtrmng->def_policy[domain];
15358
15359         __flow_dv_destroy_sub_policy_rules(dev, &def_policy->sub_policy);
15360         mlx5_free(def_policy);
15361         priv->sh->mtrmng->def_policy[domain] = NULL;
15362 }
15363
15364 /**
15365  * Destroy the default policy table set.
15366  *
15367  * @param[in] dev
15368  *   Pointer to Ethernet device.
15369  */
15370 static void
15371 flow_dv_destroy_def_policy(struct rte_eth_dev *dev)
15372 {
15373         struct mlx5_priv *priv = dev->data->dev_private;
15374         int i;
15375
15376         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++)
15377                 if (priv->sh->mtrmng->def_policy[i])
15378                         __flow_dv_destroy_domain_def_policy(dev,
15379                                         (enum mlx5_meter_domain)i);
15380         priv->sh->mtrmng->def_policy_id = MLX5_INVALID_POLICY_ID;
15381 }
15382
15383 static int
15384 __flow_dv_create_policy_flow(struct rte_eth_dev *dev,
15385                         uint32_t color_reg_c_idx,
15386                         enum rte_color color, void *matcher_object,
15387                         int actions_n, void *actions,
15388                         bool match_src_port, void **rule,
15389                         const struct rte_flow_attr *attr)
15390 {
15391         int ret;
15392         struct mlx5_flow_dv_match_params value = {
15393                 .size = sizeof(value.buf) -
15394                         MLX5_ST_SZ_BYTES(fte_match_set_misc4),
15395         };
15396         struct mlx5_flow_dv_match_params matcher = {
15397                 .size = sizeof(matcher.buf) -
15398                         MLX5_ST_SZ_BYTES(fte_match_set_misc4),
15399         };
15400         struct mlx5_priv *priv = dev->data->dev_private;
15401
15402         if (match_src_port && (priv->representor || priv->master)) {
15403                 if (flow_dv_translate_item_port_id(dev, matcher.buf,
15404                                                    value.buf, NULL, attr)) {
15405                         DRV_LOG(ERR,
15406                         "Failed to create meter policy flow with port.");
15407                         return -1;
15408                 }
15409         }
15410         flow_dv_match_meta_reg(matcher.buf, value.buf,
15411                                 (enum modify_reg)color_reg_c_idx,
15412                                 rte_col_2_mlx5_col(color),
15413                                 UINT32_MAX);
15414         ret = mlx5_flow_os_create_flow(matcher_object,
15415                         (void *)&value, actions_n, actions, rule);
15416         if (ret) {
15417                 DRV_LOG(ERR, "Failed to create meter policy flow.");
15418                 return -1;
15419         }
15420         return 0;
15421 }
15422
15423 static int
15424 __flow_dv_create_policy_matcher(struct rte_eth_dev *dev,
15425                         uint32_t color_reg_c_idx,
15426                         uint16_t priority,
15427                         struct mlx5_flow_meter_sub_policy *sub_policy,
15428                         const struct rte_flow_attr *attr,
15429                         bool match_src_port,
15430                         struct rte_flow_error *error)
15431 {
15432         struct mlx5_cache_entry *entry;
15433         struct mlx5_flow_tbl_resource *tbl_rsc = sub_policy->tbl_rsc;
15434         struct mlx5_flow_dv_matcher matcher = {
15435                 .mask = {
15436                         .size = sizeof(matcher.mask.buf) -
15437                                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
15438                 },
15439                 .tbl = tbl_rsc,
15440         };
15441         struct mlx5_flow_dv_match_params value = {
15442                 .size = sizeof(value.buf) -
15443                         MLX5_ST_SZ_BYTES(fte_match_set_misc4),
15444         };
15445         struct mlx5_flow_cb_ctx ctx = {
15446                 .error = error,
15447                 .data = &matcher,
15448         };
15449         struct mlx5_flow_tbl_data_entry *tbl_data;
15450         struct mlx5_priv *priv = dev->data->dev_private;
15451         uint32_t color_mask = (UINT32_C(1) << MLX5_MTR_COLOR_BITS) - 1;
15452
15453         if (match_src_port && (priv->representor || priv->master)) {
15454                 if (flow_dv_translate_item_port_id(dev, matcher.mask.buf,
15455                                                    value.buf, NULL, attr)) {
15456                         DRV_LOG(ERR,
15457                         "Failed to register meter drop matcher with port.");
15458                         return -1;
15459                 }
15460         }
15461         tbl_data = container_of(tbl_rsc, struct mlx5_flow_tbl_data_entry, tbl);
15462         if (priority < RTE_COLOR_RED)
15463                 flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
15464                         (enum modify_reg)color_reg_c_idx, 0, color_mask);
15465         matcher.priority = priority;
15466         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
15467                                         matcher.mask.size);
15468         entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
15469         if (!entry) {
15470                 DRV_LOG(ERR, "Failed to register meter drop matcher.");
15471                 return -1;
15472         }
15473         sub_policy->color_matcher[priority] =
15474                 container_of(entry, struct mlx5_flow_dv_matcher, entry);
15475         return 0;
15476 }
15477
15478 /**
15479  * Create the policy rules per domain.
15480  *
15481  * @param[in] dev
15482  *   Pointer to Ethernet device.
15483  * @param[in] sub_policy
15484  *    Pointer to sub policy table..
15485  * @param[in] egress
15486  *   Direction of the table.
15487  * @param[in] transfer
15488  *   E-Switch or NIC flow.
15489  * @param[in] acts
15490  *   Pointer to policy action list per color.
15491  *
15492  * @return
15493  *   0 on success, -1 otherwise.
15494  */
15495 static int
15496 __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev,
15497                 struct mlx5_flow_meter_sub_policy *sub_policy,
15498                 uint8_t egress, uint8_t transfer, bool match_src_port,
15499                 struct mlx5_meter_policy_acts acts[RTE_COLORS])
15500 {
15501         struct rte_flow_error flow_err;
15502         uint32_t color_reg_c_idx;
15503         struct rte_flow_attr attr = {
15504                 .group = MLX5_FLOW_TABLE_LEVEL_POLICY,
15505                 .priority = 0,
15506                 .ingress = 0,
15507                 .egress = !!egress,
15508                 .transfer = !!transfer,
15509                 .reserved = 0,
15510         };
15511         int i;
15512         int ret = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &flow_err);
15513
15514         if (ret < 0)
15515                 return -1;
15516         /* Create policy table with POLICY level. */
15517         if (!sub_policy->tbl_rsc)
15518                 sub_policy->tbl_rsc = flow_dv_tbl_resource_get(dev,
15519                                 MLX5_FLOW_TABLE_LEVEL_POLICY,
15520                                 egress, transfer, false, NULL, 0, 0,
15521                                 sub_policy->idx, &flow_err);
15522         if (!sub_policy->tbl_rsc) {
15523                 DRV_LOG(ERR,
15524                         "Failed to create meter sub policy table.");
15525                 return -1;
15526         }
15527         /* Prepare matchers. */
15528         color_reg_c_idx = ret;
15529         for (i = 0; i < RTE_COLORS; i++) {
15530                 if (i == RTE_COLOR_YELLOW || !acts[i].actions_n)
15531                         continue;
15532                 attr.priority = i;
15533                 if (!sub_policy->color_matcher[i]) {
15534                         /* Create matchers for Color. */
15535                         if (__flow_dv_create_policy_matcher(dev,
15536                                 color_reg_c_idx, i, sub_policy,
15537                                 &attr, match_src_port, &flow_err))
15538                                 return -1;
15539                 }
15540                 /* Create flow, matching color. */
15541                 if (acts[i].actions_n)
15542                         if (__flow_dv_create_policy_flow(dev,
15543                                 color_reg_c_idx, (enum rte_color)i,
15544                                 sub_policy->color_matcher[i]->matcher_object,
15545                                 acts[i].actions_n,
15546                                 acts[i].dv_actions,
15547                                 match_src_port,
15548                                 &sub_policy->color_rule[i],
15549                                 &attr))
15550                                 return -1;
15551         }
15552         return 0;
15553 }
15554
15555 static int
15556 __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev,
15557                         struct mlx5_flow_meter_policy *mtr_policy,
15558                         struct mlx5_flow_meter_sub_policy *sub_policy,
15559                         uint32_t domain)
15560 {
15561         struct mlx5_priv *priv = dev->data->dev_private;
15562         struct mlx5_meter_policy_acts acts[RTE_COLORS];
15563         struct mlx5_flow_dv_tag_resource *tag;
15564         struct mlx5_flow_dv_port_id_action_resource *port_action;
15565         struct mlx5_hrxq *hrxq;
15566         uint8_t egress, transfer;
15567         bool match_src_port = false;
15568         int i;
15569
15570         for (i = 0; i < RTE_COLORS; i++) {
15571                 acts[i].actions_n = 0;
15572                 if (i == RTE_COLOR_YELLOW)
15573                         continue;
15574                 if (i == RTE_COLOR_RED) {
15575                         /* Only support drop on red. */
15576                         acts[i].dv_actions[0] =
15577                         mtr_policy->dr_drop_action[domain];
15578                         acts[i].actions_n = 1;
15579                         continue;
15580                 }
15581                 if (mtr_policy->act_cnt[i].rix_mark) {
15582                         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG],
15583                                         mtr_policy->act_cnt[i].rix_mark);
15584                         if (!tag) {
15585                                 DRV_LOG(ERR, "Failed to find "
15586                                 "mark action for policy.");
15587                                 return -1;
15588                         }
15589                         acts[i].dv_actions[acts[i].actions_n] =
15590                                                 tag->action;
15591                         acts[i].actions_n++;
15592                 }
15593                 if (mtr_policy->act_cnt[i].modify_hdr) {
15594                         acts[i].dv_actions[acts[i].actions_n] =
15595                         mtr_policy->act_cnt[i].modify_hdr->action;
15596                         acts[i].actions_n++;
15597                 }
15598                 if (mtr_policy->act_cnt[i].fate_action) {
15599                         switch (mtr_policy->act_cnt[i].fate_action) {
15600                         case MLX5_FLOW_FATE_PORT_ID:
15601                                 port_action = mlx5_ipool_get
15602                                         (priv->sh->ipool[MLX5_IPOOL_PORT_ID],
15603                                 mtr_policy->act_cnt[i].rix_port_id_action);
15604                                 if (!port_action) {
15605                                         DRV_LOG(ERR, "Failed to find "
15606                                                 "port action for policy.");
15607                                         return -1;
15608                                 }
15609                                 acts[i].dv_actions[acts[i].actions_n] =
15610                                 port_action->action;
15611                                 acts[i].actions_n++;
15612                                 mtr_policy->dev = dev;
15613                                 match_src_port = true;
15614                                 break;
15615                         case MLX5_FLOW_FATE_DROP:
15616                         case MLX5_FLOW_FATE_JUMP:
15617                                 acts[i].dv_actions[acts[i].actions_n] =
15618                                 mtr_policy->act_cnt[i].dr_jump_action[domain];
15619                                 acts[i].actions_n++;
15620                                 break;
15621                         case MLX5_FLOW_FATE_SHARED_RSS:
15622                         case MLX5_FLOW_FATE_QUEUE:
15623                                 hrxq = mlx5_ipool_get
15624                                 (priv->sh->ipool[MLX5_IPOOL_HRXQ],
15625                                 sub_policy->rix_hrxq[i]);
15626                                 if (!hrxq) {
15627                                         DRV_LOG(ERR, "Failed to find "
15628                                                 "queue action for policy.");
15629                                         return -1;
15630                                 }
15631                                 acts[i].dv_actions[acts[i].actions_n] =
15632                                 hrxq->action;
15633                                 acts[i].actions_n++;
15634                                 break;
15635                         default:
15636                                 /*Queue action do nothing*/
15637                                 break;
15638                         }
15639                 }
15640         }
15641         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
15642         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
15643         if (__flow_dv_create_domain_policy_rules(dev, sub_policy,
15644                                 egress, transfer, match_src_port, acts)) {
15645                 DRV_LOG(ERR,
15646                 "Failed to create policy rules per domain.");
15647                 return -1;
15648         }
15649         return 0;
15650 }
15651
15652 /**
15653  * Create the policy rules.
15654  *
15655  * @param[in] dev
15656  *   Pointer to Ethernet device.
15657  * @param[in,out] mtr_policy
15658  *   Pointer to meter policy table.
15659  *
15660  * @return
15661  *   0 on success, -1 otherwise.
15662  */
15663 static int
15664 flow_dv_create_policy_rules(struct rte_eth_dev *dev,
15665                              struct mlx5_flow_meter_policy *mtr_policy)
15666 {
15667         int i;
15668         uint16_t sub_policy_num;
15669
15670         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15671                 sub_policy_num = (mtr_policy->sub_policy_num >>
15672                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
15673                         MLX5_MTR_SUB_POLICY_NUM_MASK;
15674                 if (!sub_policy_num)
15675                         continue;
15676                 /* Prepare actions list and create policy rules. */
15677                 if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
15678                         mtr_policy->sub_policys[i][0], i)) {
15679                         DRV_LOG(ERR,
15680                         "Failed to create policy action list per domain.");
15681                         return -1;
15682                 }
15683         }
15684         return 0;
15685 }
15686
15687 static int
15688 __flow_dv_create_domain_def_policy(struct rte_eth_dev *dev, uint32_t domain)
15689 {
15690         struct mlx5_priv *priv = dev->data->dev_private;
15691         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
15692         struct mlx5_flow_meter_def_policy *def_policy;
15693         struct mlx5_flow_tbl_resource *jump_tbl;
15694         struct mlx5_flow_tbl_data_entry *tbl_data;
15695         uint8_t egress, transfer;
15696         struct rte_flow_error error;
15697         struct mlx5_meter_policy_acts acts[RTE_COLORS];
15698         int ret;
15699
15700         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
15701         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
15702         def_policy = mtrmng->def_policy[domain];
15703         if (!def_policy) {
15704                 def_policy = mlx5_malloc(MLX5_MEM_ZERO,
15705                         sizeof(struct mlx5_flow_meter_def_policy),
15706                         RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
15707                 if (!def_policy) {
15708                         DRV_LOG(ERR, "Failed to alloc "
15709                                         "default policy table.");
15710                         goto def_policy_error;
15711                 }
15712                 mtrmng->def_policy[domain] = def_policy;
15713                 /* Create the meter suffix table with SUFFIX level. */
15714                 jump_tbl = flow_dv_tbl_resource_get(dev,
15715                                 MLX5_FLOW_TABLE_LEVEL_METER,
15716                                 egress, transfer, false, NULL, 0,
15717                                 0, MLX5_MTR_TABLE_ID_SUFFIX, &error);
15718                 if (!jump_tbl) {
15719                         DRV_LOG(ERR,
15720                                 "Failed to create meter suffix table.");
15721                         goto def_policy_error;
15722                 }
15723                 def_policy->sub_policy.jump_tbl[RTE_COLOR_GREEN] = jump_tbl;
15724                 tbl_data = container_of(jump_tbl,
15725                                 struct mlx5_flow_tbl_data_entry, tbl);
15726                 def_policy->dr_jump_action[RTE_COLOR_GREEN] =
15727                                                 tbl_data->jump.action;
15728                 acts[RTE_COLOR_GREEN].dv_actions[0] =
15729                                                 tbl_data->jump.action;
15730                 acts[RTE_COLOR_GREEN].actions_n = 1;
15731                 /* Create jump action to the drop table. */
15732                 if (!mtrmng->drop_tbl[domain]) {
15733                         mtrmng->drop_tbl[domain] = flow_dv_tbl_resource_get
15734                                 (dev, MLX5_FLOW_TABLE_LEVEL_METER,
15735                                 egress, transfer, false, NULL, 0,
15736                                 0, MLX5_MTR_TABLE_ID_DROP, &error);
15737                         if (!mtrmng->drop_tbl[domain]) {
15738                                 DRV_LOG(ERR, "Failed to create "
15739                                 "meter drop table for default policy.");
15740                                 goto def_policy_error;
15741                         }
15742                 }
15743                 tbl_data = container_of(mtrmng->drop_tbl[domain],
15744                                 struct mlx5_flow_tbl_data_entry, tbl);
15745                 def_policy->dr_jump_action[RTE_COLOR_RED] =
15746                                                 tbl_data->jump.action;
15747                 acts[RTE_COLOR_RED].dv_actions[0] = tbl_data->jump.action;
15748                 acts[RTE_COLOR_RED].actions_n = 1;
15749                 /* Create default policy rules. */
15750                 ret = __flow_dv_create_domain_policy_rules(dev,
15751                                         &def_policy->sub_policy,
15752                                         egress, transfer, false, acts);
15753                 if (ret) {
15754                         DRV_LOG(ERR, "Failed to create "
15755                                 "default policy rules.");
15756                                 goto def_policy_error;
15757                 }
15758         }
15759         return 0;
15760 def_policy_error:
15761         __flow_dv_destroy_domain_def_policy(dev,
15762                         (enum mlx5_meter_domain)domain);
15763         return -1;
15764 }
15765
15766 /**
15767  * Create the default policy table set.
15768  *
15769  * @param[in] dev
15770  *   Pointer to Ethernet device.
15771  * @return
15772  *   0 on success, -1 otherwise.
15773  */
15774 static int
15775 flow_dv_create_def_policy(struct rte_eth_dev *dev)
15776 {
15777         struct mlx5_priv *priv = dev->data->dev_private;
15778         int i;
15779
15780         /* Non-termination policy table. */
15781         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15782                 if (!priv->config.dv_esw_en && i == MLX5_MTR_DOMAIN_TRANSFER)
15783                         continue;
15784                 if (__flow_dv_create_domain_def_policy(dev, i)) {
15785                         DRV_LOG(ERR,
15786                         "Failed to create default policy");
15787                         return -1;
15788                 }
15789         }
15790         return 0;
15791 }
15792
15793 /**
15794  * Create the needed meter tables.
15795  * Lock free, (mutex should be acquired by caller).
15796  *
15797  * @param[in] dev
15798  *   Pointer to Ethernet device.
15799  * @param[in] fm
15800  *   Meter information table.
15801  * @param[in] mtr_idx
15802  *   Meter index.
15803  * @param[in] domain_bitmap
15804  *   Domain bitmap.
15805  * @return
15806  *   0 on success, -1 otherwise.
15807  */
15808 static int
15809 flow_dv_create_mtr_tbls(struct rte_eth_dev *dev,
15810                         struct mlx5_flow_meter_info *fm,
15811                         uint32_t mtr_idx,
15812                         uint8_t domain_bitmap)
15813 {
15814         struct mlx5_priv *priv = dev->data->dev_private;
15815         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
15816         struct rte_flow_error error;
15817         struct mlx5_flow_tbl_data_entry *tbl_data;
15818         uint8_t egress, transfer;
15819         void *actions[METER_ACTIONS];
15820         int domain, ret, i;
15821         struct mlx5_flow_counter *cnt;
15822         struct mlx5_flow_dv_match_params value = {
15823                 .size = sizeof(value.buf) -
15824                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
15825         };
15826         struct mlx5_flow_dv_match_params matcher_para = {
15827                 .size = sizeof(matcher_para.buf) -
15828                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
15829         };
15830         int mtr_id_reg_c = mlx5_flow_get_reg_id(dev, MLX5_MTR_ID,
15831                                                      0, &error);
15832         uint32_t mtr_id_mask = (UINT32_C(1) << mtrmng->max_mtr_bits) - 1;
15833         uint8_t mtr_id_offset = priv->mtr_reg_share ? MLX5_MTR_COLOR_BITS : 0;
15834         struct mlx5_cache_entry *entry;
15835         struct mlx5_flow_dv_matcher matcher = {
15836                 .mask = {
15837                         .size = sizeof(matcher.mask.buf) -
15838                         MLX5_ST_SZ_BYTES(fte_match_set_misc4),
15839                 },
15840         };
15841         struct mlx5_flow_dv_matcher *drop_matcher;
15842         struct mlx5_flow_cb_ctx ctx = {
15843                 .error = &error,
15844                 .data = &matcher,
15845         };
15846
15847         if (!priv->mtr_en || mtr_id_reg_c < 0) {
15848                 rte_errno = ENOTSUP;
15849                 return -1;
15850         }
15851         for (domain = 0; domain < MLX5_MTR_DOMAIN_MAX; domain++) {
15852                 if (!(domain_bitmap & (1 << domain)) ||
15853                         (mtrmng->def_rule[domain] && !fm->drop_cnt))
15854                         continue;
15855                 egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
15856                 transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
15857                 /* Create the drop table with METER DROP level. */
15858                 if (!mtrmng->drop_tbl[domain]) {
15859                         mtrmng->drop_tbl[domain] = flow_dv_tbl_resource_get(dev,
15860                                         MLX5_FLOW_TABLE_LEVEL_METER,
15861                                         egress, transfer, false, NULL, 0,
15862                                         0, MLX5_MTR_TABLE_ID_DROP, &error);
15863                         if (!mtrmng->drop_tbl[domain]) {
15864                                 DRV_LOG(ERR, "Failed to create meter drop table.");
15865                                 goto policy_error;
15866                         }
15867                 }
15868                 /* Create default matcher in drop table. */
15869                 matcher.tbl = mtrmng->drop_tbl[domain],
15870                 tbl_data = container_of(mtrmng->drop_tbl[domain],
15871                                 struct mlx5_flow_tbl_data_entry, tbl);
15872                 if (!mtrmng->def_matcher[domain]) {
15873                         flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
15874                                        (enum modify_reg)mtr_id_reg_c,
15875                                        0, 0);
15876                         matcher.priority = MLX5_MTRS_DEFAULT_RULE_PRIORITY;
15877                         matcher.crc = rte_raw_cksum
15878                                         ((const void *)matcher.mask.buf,
15879                                         matcher.mask.size);
15880                         entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
15881                         if (!entry) {
15882                                 DRV_LOG(ERR, "Failed to register meter "
15883                                 "drop default matcher.");
15884                                 goto policy_error;
15885                         }
15886                         mtrmng->def_matcher[domain] = container_of(entry,
15887                         struct mlx5_flow_dv_matcher, entry);
15888                 }
15889                 /* Create default rule in drop table. */
15890                 if (!mtrmng->def_rule[domain]) {
15891                         i = 0;
15892                         actions[i++] = priv->sh->dr_drop_action;
15893                         flow_dv_match_meta_reg(matcher_para.buf, value.buf,
15894                                 (enum modify_reg)mtr_id_reg_c, 0, 0);
15895                         ret = mlx5_flow_os_create_flow
15896                                 (mtrmng->def_matcher[domain]->matcher_object,
15897                                 (void *)&value, i, actions,
15898                                 &mtrmng->def_rule[domain]);
15899                         if (ret) {
15900                                 DRV_LOG(ERR, "Failed to create meter "
15901                                 "default drop rule for drop table.");
15902                                 goto policy_error;
15903                         }
15904                 }
15905                 if (!fm->drop_cnt)
15906                         continue;
15907                 MLX5_ASSERT(mtrmng->max_mtr_bits);
15908                 if (!mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1]) {
15909                         /* Create matchers for Drop. */
15910                         flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
15911                                         (enum modify_reg)mtr_id_reg_c, 0,
15912                                         (mtr_id_mask << mtr_id_offset));
15913                         matcher.priority = MLX5_REG_BITS - mtrmng->max_mtr_bits;
15914                         matcher.crc = rte_raw_cksum
15915                                         ((const void *)matcher.mask.buf,
15916                                         matcher.mask.size);
15917                         entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
15918                         if (!entry) {
15919                                 DRV_LOG(ERR,
15920                                 "Failed to register meter drop matcher.");
15921                                 goto policy_error;
15922                         }
15923                         mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1] =
15924                                 container_of(entry, struct mlx5_flow_dv_matcher,
15925                                              entry);
15926                 }
15927                 drop_matcher =
15928                         mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1];
15929                 /* Create drop rule, matching meter_id only. */
15930                 flow_dv_match_meta_reg(matcher_para.buf, value.buf,
15931                                 (enum modify_reg)mtr_id_reg_c,
15932                                 (mtr_idx << mtr_id_offset), UINT32_MAX);
15933                 i = 0;
15934                 cnt = flow_dv_counter_get_by_idx(dev,
15935                                         fm->drop_cnt, NULL);
15936                 actions[i++] = cnt->action;
15937                 actions[i++] = priv->sh->dr_drop_action;
15938                 ret = mlx5_flow_os_create_flow(drop_matcher->matcher_object,
15939                                                (void *)&value, i, actions,
15940                                                &fm->drop_rule[domain]);
15941                 if (ret) {
15942                         DRV_LOG(ERR, "Failed to create meter "
15943                                 "drop rule for drop table.");
15944                                 goto policy_error;
15945                 }
15946         }
15947         return 0;
15948 policy_error:
15949         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15950                 if (fm->drop_rule[i]) {
15951                         claim_zero(mlx5_flow_os_destroy_flow
15952                                 (fm->drop_rule[i]));
15953                         fm->drop_rule[i] = NULL;
15954                 }
15955         }
15956         return -1;
15957 }
15958
15959 /**
15960  * Find the policy table for prefix table with RSS.
15961  *
15962  * @param[in] dev
15963  *   Pointer to Ethernet device.
15964  * @param[in] mtr_policy
15965  *   Pointer to meter policy table.
15966  * @param[in] rss_desc
15967  *   Pointer to rss_desc
15968  * @return
15969  *   Pointer to table set on success, NULL otherwise and rte_errno is set.
15970  */
15971 static struct mlx5_flow_meter_sub_policy *
15972 flow_dv_meter_sub_policy_rss_prepare(struct rte_eth_dev *dev,
15973                 struct mlx5_flow_meter_policy *mtr_policy,
15974                 struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS])
15975 {
15976         struct mlx5_priv *priv = dev->data->dev_private;
15977         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
15978         uint32_t sub_policy_idx = 0;
15979         uint32_t hrxq_idx[MLX5_MTR_RTE_COLORS] = {0};
15980         uint32_t i, j;
15981         struct mlx5_hrxq *hrxq;
15982         struct mlx5_flow_handle dh;
15983         struct mlx5_meter_policy_action_container *act_cnt;
15984         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
15985         uint16_t sub_policy_num;
15986
15987         rte_spinlock_lock(&mtr_policy->sl);
15988         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
15989                 if (!rss_desc[i])
15990                         continue;
15991                 hrxq_idx[i] = mlx5_hrxq_get(dev, rss_desc[i]);
15992                 if (!hrxq_idx[i]) {
15993                         rte_spinlock_unlock(&mtr_policy->sl);
15994                         return NULL;
15995                 }
15996         }
15997         sub_policy_num = (mtr_policy->sub_policy_num >>
15998                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
15999                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16000         for (i = 0; i < sub_policy_num;
16001                 i++) {
16002                 for (j = 0; j < MLX5_MTR_RTE_COLORS; j++) {
16003                         if (rss_desc[j] &&
16004                                 hrxq_idx[j] !=
16005                         mtr_policy->sub_policys[domain][i]->rix_hrxq[j])
16006                                 break;
16007                 }
16008                 if (j >= MLX5_MTR_RTE_COLORS) {
16009                         /*
16010                          * Found the sub policy table with
16011                          * the same queue per color
16012                          */
16013                         rte_spinlock_unlock(&mtr_policy->sl);
16014                         for (j = 0; j < MLX5_MTR_RTE_COLORS; j++)
16015                                 mlx5_hrxq_release(dev, hrxq_idx[j]);
16016                         return mtr_policy->sub_policys[domain][i];
16017                 }
16018         }
16019         /* Create sub policy. */
16020         if (!mtr_policy->sub_policys[domain][0]->rix_hrxq[0]) {
16021                 /* Reuse the first dummy sub_policy*/
16022                 sub_policy = mtr_policy->sub_policys[domain][0];
16023                 sub_policy_idx = sub_policy->idx;
16024         } else {
16025                 sub_policy = mlx5_ipool_zmalloc
16026                                 (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16027                                 &sub_policy_idx);
16028                 if (!sub_policy ||
16029                         sub_policy_idx > MLX5_MAX_SUB_POLICY_TBL_NUM) {
16030                         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++)
16031                                 mlx5_hrxq_release(dev, hrxq_idx[i]);
16032                         goto rss_sub_policy_error;
16033                 }
16034                 sub_policy->idx = sub_policy_idx;
16035                 sub_policy->main_policy = mtr_policy;
16036         }
16037         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
16038                 if (!rss_desc[i])
16039                         continue;
16040                 sub_policy->rix_hrxq[i] = hrxq_idx[i];
16041                 /*
16042                  * Overwrite the last action from
16043                  * RSS action to Queue action.
16044                  */
16045                 hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
16046                               hrxq_idx[i]);
16047                 if (!hrxq) {
16048                         DRV_LOG(ERR, "Failed to create policy hrxq");
16049                         goto rss_sub_policy_error;
16050                 }
16051                 act_cnt = &mtr_policy->act_cnt[i];
16052                 if (act_cnt->rix_mark || act_cnt->modify_hdr) {
16053                         memset(&dh, 0, sizeof(struct mlx5_flow_handle));
16054                         if (act_cnt->rix_mark)
16055                                 dh.mark = 1;
16056                         dh.fate_action = MLX5_FLOW_FATE_QUEUE;
16057                         dh.rix_hrxq = hrxq_idx[i];
16058                         flow_drv_rxq_flags_set(dev, &dh);
16059                 }
16060         }
16061         if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
16062                 sub_policy, domain)) {
16063                 DRV_LOG(ERR, "Failed to create policy "
16064                         "rules per domain.");
16065                 goto rss_sub_policy_error;
16066         }
16067         if (sub_policy != mtr_policy->sub_policys[domain][0]) {
16068                 i = (mtr_policy->sub_policy_num >>
16069                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16070                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16071                 mtr_policy->sub_policys[domain][i] = sub_policy;
16072                 i++;
16073                 if (i > MLX5_MTR_RSS_MAX_SUB_POLICY)
16074                         goto rss_sub_policy_error;
16075                 mtr_policy->sub_policy_num &= ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
16076                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
16077                 mtr_policy->sub_policy_num |=
16078                         (i & MLX5_MTR_SUB_POLICY_NUM_MASK) <<
16079                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
16080         }
16081         rte_spinlock_unlock(&mtr_policy->sl);
16082         return sub_policy;
16083 rss_sub_policy_error:
16084         if (sub_policy) {
16085                 __flow_dv_destroy_sub_policy_rules(dev, sub_policy);
16086                 if (sub_policy != mtr_policy->sub_policys[domain][0]) {
16087                         i = (mtr_policy->sub_policy_num >>
16088                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16089                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16090                         mtr_policy->sub_policys[domain][i] = NULL;
16091                         mlx5_ipool_free
16092                         (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16093                                         sub_policy->idx);
16094                 }
16095         }
16096         if (sub_policy_idx)
16097                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16098                         sub_policy_idx);
16099         rte_spinlock_unlock(&mtr_policy->sl);
16100         return NULL;
16101 }
16102
16103
16104 /**
16105  * Destroy the sub policy table with RX queue.
16106  *
16107  * @param[in] dev
16108  *   Pointer to Ethernet device.
16109  * @param[in] mtr_policy
16110  *   Pointer to meter policy table.
16111  */
16112 static void
16113 flow_dv_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev,
16114                 struct mlx5_flow_meter_policy *mtr_policy)
16115 {
16116         struct mlx5_priv *priv = dev->data->dev_private;
16117         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
16118         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
16119         uint32_t i, j;
16120         uint16_t sub_policy_num, new_policy_num;
16121
16122         rte_spinlock_lock(&mtr_policy->sl);
16123         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
16124                 switch (mtr_policy->act_cnt[i].fate_action) {
16125                 case MLX5_FLOW_FATE_SHARED_RSS:
16126                         sub_policy_num = (mtr_policy->sub_policy_num >>
16127                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16128                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16129                         new_policy_num = sub_policy_num;
16130                         for (j = 0; j < sub_policy_num; j++) {
16131                                 sub_policy =
16132                                         mtr_policy->sub_policys[domain][j];
16133                                 if (sub_policy) {
16134                                         __flow_dv_destroy_sub_policy_rules(dev,
16135                                                 sub_policy);
16136                                 if (sub_policy !=
16137                                         mtr_policy->sub_policys[domain][0]) {
16138                                         mtr_policy->sub_policys[domain][j] =
16139                                                                 NULL;
16140                                         mlx5_ipool_free
16141                                 (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16142                                                 sub_policy->idx);
16143                                                 new_policy_num--;
16144                                         }
16145                                 }
16146                         }
16147                         if (new_policy_num != sub_policy_num) {
16148                                 mtr_policy->sub_policy_num &=
16149                                 ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
16150                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
16151                                 mtr_policy->sub_policy_num |=
16152                                 (new_policy_num &
16153                                         MLX5_MTR_SUB_POLICY_NUM_MASK) <<
16154                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
16155                         }
16156                         break;
16157                 case MLX5_FLOW_FATE_QUEUE:
16158                         sub_policy = mtr_policy->sub_policys[domain][0];
16159                         __flow_dv_destroy_sub_policy_rules(dev,
16160                                                 sub_policy);
16161                         break;
16162                 default:
16163                         /*Other actions without queue and do nothing*/
16164                         break;
16165                 }
16166         }
16167         rte_spinlock_unlock(&mtr_policy->sl);
16168 }
16169
16170 /**
16171  * Validate the batch counter support in root table.
16172  *
16173  * Create a simple flow with invalid counter and drop action on root table to
16174  * validate if batch counter with offset on root table is supported or not.
16175  *
16176  * @param[in] dev
16177  *   Pointer to rte_eth_dev structure.
16178  *
16179  * @return
16180  *   0 on success, a negative errno value otherwise and rte_errno is set.
16181  */
16182 int
16183 mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
16184 {
16185         struct mlx5_priv *priv = dev->data->dev_private;
16186         struct mlx5_dev_ctx_shared *sh = priv->sh;
16187         struct mlx5_flow_dv_match_params mask = {
16188                 .size = sizeof(mask.buf),
16189         };
16190         struct mlx5_flow_dv_match_params value = {
16191                 .size = sizeof(value.buf),
16192         };
16193         struct mlx5dv_flow_matcher_attr dv_attr = {
16194                 .type = IBV_FLOW_ATTR_NORMAL | IBV_FLOW_ATTR_FLAGS_EGRESS,
16195                 .priority = 0,
16196                 .match_criteria_enable = 0,
16197                 .match_mask = (void *)&mask,
16198         };
16199         void *actions[2] = { 0 };
16200         struct mlx5_flow_tbl_resource *tbl = NULL;
16201         struct mlx5_devx_obj *dcs = NULL;
16202         void *matcher = NULL;
16203         void *flow = NULL;
16204         int ret = -1;
16205
16206         tbl = flow_dv_tbl_resource_get(dev, 0, 1, 0, false, NULL,
16207                                         0, 0, 0, NULL);
16208         if (!tbl)
16209                 goto err;
16210         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
16211         if (!dcs)
16212                 goto err;
16213         ret = mlx5_flow_os_create_flow_action_count(dcs->obj, UINT16_MAX,
16214                                                     &actions[0]);
16215         if (ret)
16216                 goto err;
16217         dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
16218         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
16219                                                &matcher);
16220         if (ret)
16221                 goto err;
16222         ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 1,
16223                                        actions, &flow);
16224 err:
16225         /*
16226          * If batch counter with offset is not supported, the driver will not
16227          * validate the invalid offset value, flow create should success.
16228          * In this case, it means batch counter is not supported in root table.
16229          *
16230          * Otherwise, if flow create is failed, counter offset is supported.
16231          */
16232         if (flow) {
16233                 DRV_LOG(INFO, "Batch counter is not supported in root "
16234                               "table. Switch to fallback mode.");
16235                 rte_errno = ENOTSUP;
16236                 ret = -rte_errno;
16237                 claim_zero(mlx5_flow_os_destroy_flow(flow));
16238         } else {
16239                 /* Check matcher to make sure validate fail at flow create. */
16240                 if (!matcher || (matcher && errno != EINVAL))
16241                         DRV_LOG(ERR, "Unexpected error in counter offset "
16242                                      "support detection");
16243                 ret = 0;
16244         }
16245         if (actions[0])
16246                 claim_zero(mlx5_flow_os_destroy_flow_action(actions[0]));
16247         if (matcher)
16248                 claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
16249         if (tbl)
16250                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
16251         if (dcs)
16252                 claim_zero(mlx5_devx_cmd_destroy(dcs));
16253         return ret;
16254 }
16255
16256 /**
16257  * Query a devx counter.
16258  *
16259  * @param[in] dev
16260  *   Pointer to the Ethernet device structure.
16261  * @param[in] cnt
16262  *   Index to the flow counter.
16263  * @param[in] clear
16264  *   Set to clear the counter statistics.
16265  * @param[out] pkts
16266  *   The statistics value of packets.
16267  * @param[out] bytes
16268  *   The statistics value of bytes.
16269  *
16270  * @return
16271  *   0 on success, otherwise return -1.
16272  */
16273 static int
16274 flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear,
16275                       uint64_t *pkts, uint64_t *bytes)
16276 {
16277         struct mlx5_priv *priv = dev->data->dev_private;
16278         struct mlx5_flow_counter *cnt;
16279         uint64_t inn_pkts, inn_bytes;
16280         int ret;
16281
16282         if (!priv->config.devx)
16283                 return -1;
16284
16285         ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes);
16286         if (ret)
16287                 return -1;
16288         cnt = flow_dv_counter_get_by_idx(dev, counter, NULL);
16289         *pkts = inn_pkts - cnt->hits;
16290         *bytes = inn_bytes - cnt->bytes;
16291         if (clear) {
16292                 cnt->hits = inn_pkts;
16293                 cnt->bytes = inn_bytes;
16294         }
16295         return 0;
16296 }
16297
16298 /**
16299  * Get aged-out flows.
16300  *
16301  * @param[in] dev
16302  *   Pointer to the Ethernet device structure.
16303  * @param[in] context
16304  *   The address of an array of pointers to the aged-out flows contexts.
16305  * @param[in] nb_contexts
16306  *   The length of context array pointers.
16307  * @param[out] error
16308  *   Perform verbose error reporting if not NULL. Initialized in case of
16309  *   error only.
16310  *
16311  * @return
16312  *   how many contexts get in success, otherwise negative errno value.
16313  *   if nb_contexts is 0, return the amount of all aged contexts.
16314  *   if nb_contexts is not 0 , return the amount of aged flows reported
16315  *   in the context array.
16316  * @note: only stub for now
16317  */
16318 static int
16319 flow_get_aged_flows(struct rte_eth_dev *dev,
16320                     void **context,
16321                     uint32_t nb_contexts,
16322                     struct rte_flow_error *error)
16323 {
16324         struct mlx5_priv *priv = dev->data->dev_private;
16325         struct mlx5_age_info *age_info;
16326         struct mlx5_age_param *age_param;
16327         struct mlx5_flow_counter *counter;
16328         struct mlx5_aso_age_action *act;
16329         int nb_flows = 0;
16330
16331         if (nb_contexts && !context)
16332                 return rte_flow_error_set(error, EINVAL,
16333                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
16334                                           NULL, "empty context");
16335         age_info = GET_PORT_AGE_INFO(priv);
16336         rte_spinlock_lock(&age_info->aged_sl);
16337         LIST_FOREACH(act, &age_info->aged_aso, next) {
16338                 nb_flows++;
16339                 if (nb_contexts) {
16340                         context[nb_flows - 1] =
16341                                                 act->age_params.context;
16342                         if (!(--nb_contexts))
16343                                 break;
16344                 }
16345         }
16346         TAILQ_FOREACH(counter, &age_info->aged_counters, next) {
16347                 nb_flows++;
16348                 if (nb_contexts) {
16349                         age_param = MLX5_CNT_TO_AGE(counter);
16350                         context[nb_flows - 1] = age_param->context;
16351                         if (!(--nb_contexts))
16352                                 break;
16353                 }
16354         }
16355         rte_spinlock_unlock(&age_info->aged_sl);
16356         MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
16357         return nb_flows;
16358 }
16359
16360 /*
16361  * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
16362  */
16363 static uint32_t
16364 flow_dv_counter_allocate(struct rte_eth_dev *dev)
16365 {
16366         return flow_dv_counter_alloc(dev, 0);
16367 }
16368
16369 /**
16370  * Validate indirect action.
16371  * Dispatcher for action type specific validation.
16372  *
16373  * @param[in] dev
16374  *   Pointer to the Ethernet device structure.
16375  * @param[in] conf
16376  *   Indirect action configuration.
16377  * @param[in] action
16378  *   The indirect action object to validate.
16379  * @param[out] error
16380  *   Perform verbose error reporting if not NULL. Initialized in case of
16381  *   error only.
16382  *
16383  * @return
16384  *   0 on success, otherwise negative errno value.
16385  */
16386 static int
16387 flow_dv_action_validate(struct rte_eth_dev *dev,
16388                         const struct rte_flow_indir_action_conf *conf,
16389                         const struct rte_flow_action *action,
16390                         struct rte_flow_error *err)
16391 {
16392         struct mlx5_priv *priv = dev->data->dev_private;
16393
16394         RTE_SET_USED(conf);
16395         switch (action->type) {
16396         case RTE_FLOW_ACTION_TYPE_RSS:
16397                 /*
16398                  * priv->obj_ops is set according to driver capabilities.
16399                  * When DevX capabilities are
16400                  * sufficient, it is set to devx_obj_ops.
16401                  * Otherwise, it is set to ibv_obj_ops.
16402                  * ibv_obj_ops doesn't support ind_table_modify operation.
16403                  * In this case the indirect RSS action can't be used.
16404                  */
16405                 if (priv->obj_ops.ind_table_modify == NULL)
16406                         return rte_flow_error_set
16407                                         (err, ENOTSUP,
16408                                          RTE_FLOW_ERROR_TYPE_ACTION,
16409                                          NULL,
16410                                          "Indirect RSS action not supported");
16411                 return mlx5_validate_action_rss(dev, action, err);
16412         case RTE_FLOW_ACTION_TYPE_AGE:
16413                 if (!priv->sh->aso_age_mng)
16414                         return rte_flow_error_set(err, ENOTSUP,
16415                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
16416                                                 NULL,
16417                                                 "Indirect age action not supported");
16418                 return flow_dv_validate_action_age(0, action, dev, err);
16419         case RTE_FLOW_ACTION_TYPE_COUNT:
16420                 /*
16421                  * There are two mechanisms to share the action count.
16422                  * The old mechanism uses the shared field to share, while the
16423                  * new mechanism uses the indirect action API.
16424                  * This validation comes to make sure that the two mechanisms
16425                  * are not combined.
16426                  */
16427                 if (is_shared_action_count(action))
16428                         return rte_flow_error_set(err, ENOTSUP,
16429                                                   RTE_FLOW_ERROR_TYPE_ACTION,
16430                                                   NULL,
16431                                                   "Mix shared and indirect counter is not supported");
16432                 return flow_dv_validate_action_count(dev, true, 0, err);
16433         case RTE_FLOW_ACTION_TYPE_CONNTRACK:
16434                 if (!priv->sh->ct_aso_en)
16435                         return rte_flow_error_set(err, ENOTSUP,
16436                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
16437                                         "ASO CT is not supported");
16438                 return mlx5_validate_action_ct(dev, action->conf, err);
16439         default:
16440                 return rte_flow_error_set(err, ENOTSUP,
16441                                           RTE_FLOW_ERROR_TYPE_ACTION,
16442                                           NULL,
16443                                           "action type not supported");
16444         }
16445 }
16446
16447 /**
16448  * Validate meter policy actions.
16449  * Dispatcher for action type specific validation.
16450  *
16451  * @param[in] dev
16452  *   Pointer to the Ethernet device structure.
16453  * @param[in] action
16454  *   The meter policy action object to validate.
16455  * @param[in] attr
16456  *   Attributes of flow to determine steering domain.
16457  * @param[out] error
16458  *   Perform verbose error reporting if not NULL. Initialized in case of
16459  *   error only.
16460  *
16461  * @return
16462  *   0 on success, otherwise negative errno value.
16463  */
16464 static int
16465 flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev,
16466                         const struct rte_flow_action *actions[RTE_COLORS],
16467                         struct rte_flow_attr *attr,
16468                         bool *is_rss,
16469                         uint8_t *domain_bitmap,
16470                         bool *is_def_policy,
16471                         struct rte_mtr_error *error)
16472 {
16473         struct mlx5_priv *priv = dev->data->dev_private;
16474         struct mlx5_dev_config *dev_conf = &priv->config;
16475         const struct rte_flow_action *act;
16476         uint64_t action_flags = 0;
16477         int actions_n;
16478         int i, ret;
16479         struct rte_flow_error flow_err;
16480         uint8_t domain_color[RTE_COLORS] = {0};
16481         uint8_t def_domain = MLX5_MTR_ALL_DOMAIN_BIT;
16482
16483         if (!priv->config.dv_esw_en)
16484                 def_domain &= ~MLX5_MTR_DOMAIN_TRANSFER_BIT;
16485         *domain_bitmap = def_domain;
16486         if (actions[RTE_COLOR_YELLOW] &&
16487                 actions[RTE_COLOR_YELLOW]->type != RTE_FLOW_ACTION_TYPE_END)
16488                 return -rte_mtr_error_set(error, ENOTSUP,
16489                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
16490                                 NULL,
16491                                 "Yellow color does not support any action.");
16492         if (actions[RTE_COLOR_YELLOW] &&
16493                 actions[RTE_COLOR_YELLOW]->type != RTE_FLOW_ACTION_TYPE_DROP)
16494                 return -rte_mtr_error_set(error, ENOTSUP,
16495                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
16496                                 NULL, "Red color only supports drop action.");
16497         /*
16498          * Check default policy actions:
16499          * Green/Yellow: no action, Red: drop action
16500          */
16501         if ((!actions[RTE_COLOR_GREEN] ||
16502                 actions[RTE_COLOR_GREEN]->type == RTE_FLOW_ACTION_TYPE_END)) {
16503                 *is_def_policy = true;
16504                 return 0;
16505         }
16506         flow_err.message = NULL;
16507         for (i = 0; i < RTE_COLORS; i++) {
16508                 act = actions[i];
16509                 for (action_flags = 0, actions_n = 0;
16510                         act && act->type != RTE_FLOW_ACTION_TYPE_END;
16511                         act++) {
16512                         if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
16513                                 return -rte_mtr_error_set(error, ENOTSUP,
16514                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
16515                                           NULL, "too many actions");
16516                         switch (act->type) {
16517                         case RTE_FLOW_ACTION_TYPE_PORT_ID:
16518                                 if (!priv->config.dv_esw_en)
16519                                         return -rte_mtr_error_set(error,
16520                                         ENOTSUP,
16521                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16522                                         NULL, "PORT action validate check"
16523                                         " fail for ESW disable");
16524                                 ret = flow_dv_validate_action_port_id(dev,
16525                                                 action_flags,
16526                                                 act, attr, &flow_err);
16527                                 if (ret)
16528                                         return -rte_mtr_error_set(error,
16529                                         ENOTSUP,
16530                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16531                                         NULL, flow_err.message ?
16532                                         flow_err.message :
16533                                         "PORT action validate check fail");
16534                                 ++actions_n;
16535                                 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
16536                                 break;
16537                         case RTE_FLOW_ACTION_TYPE_MARK:
16538                                 ret = flow_dv_validate_action_mark(dev, act,
16539                                                            action_flags,
16540                                                            attr, &flow_err);
16541                                 if (ret < 0)
16542                                         return -rte_mtr_error_set(error,
16543                                         ENOTSUP,
16544                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16545                                         NULL, flow_err.message ?
16546                                         flow_err.message :
16547                                         "Mark action validate check fail");
16548                                 if (dev_conf->dv_xmeta_en !=
16549                                         MLX5_XMETA_MODE_LEGACY)
16550                                         return -rte_mtr_error_set(error,
16551                                         ENOTSUP,
16552                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16553                                         NULL, "Extend MARK action is "
16554                                         "not supported. Please try use "
16555                                         "default policy for meter.");
16556                                 action_flags |= MLX5_FLOW_ACTION_MARK;
16557                                 ++actions_n;
16558                                 break;
16559                         case RTE_FLOW_ACTION_TYPE_SET_TAG:
16560                                 ret = flow_dv_validate_action_set_tag(dev,
16561                                                         act, action_flags,
16562                                                         attr, &flow_err);
16563                                 if (ret)
16564                                         return -rte_mtr_error_set(error,
16565                                         ENOTSUP,
16566                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16567                                         NULL, flow_err.message ?
16568                                         flow_err.message :
16569                                         "Set tag action validate check fail");
16570                                 /*
16571                                  * Count all modify-header actions
16572                                  * as one action.
16573                                  */
16574                                 if (!(action_flags &
16575                                         MLX5_FLOW_MODIFY_HDR_ACTIONS))
16576                                         ++actions_n;
16577                                 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
16578                                 break;
16579                         case RTE_FLOW_ACTION_TYPE_DROP:
16580                                 ret = mlx5_flow_validate_action_drop
16581                                         (action_flags,
16582                                         attr, &flow_err);
16583                                 if (ret < 0)
16584                                         return -rte_mtr_error_set(error,
16585                                         ENOTSUP,
16586                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16587                                         NULL, flow_err.message ?
16588                                         flow_err.message :
16589                                         "Drop action validate check fail");
16590                                 action_flags |= MLX5_FLOW_ACTION_DROP;
16591                                 ++actions_n;
16592                                 break;
16593                         case RTE_FLOW_ACTION_TYPE_QUEUE:
16594                                 /*
16595                                  * Check whether extensive
16596                                  * metadata feature is engaged.
16597                                  */
16598                                 if (dev_conf->dv_flow_en &&
16599                                         (dev_conf->dv_xmeta_en !=
16600                                         MLX5_XMETA_MODE_LEGACY) &&
16601                                         mlx5_flow_ext_mreg_supported(dev))
16602                                         return -rte_mtr_error_set(error,
16603                                           ENOTSUP,
16604                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
16605                                           NULL, "Queue action with meta "
16606                                           "is not supported. Please try use "
16607                                           "default policy for meter.");
16608                                 ret = mlx5_flow_validate_action_queue(act,
16609                                                         action_flags, dev,
16610                                                         attr, &flow_err);
16611                                 if (ret < 0)
16612                                         return -rte_mtr_error_set(error,
16613                                           ENOTSUP,
16614                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
16615                                           NULL, flow_err.message ?
16616                                           flow_err.message :
16617                                           "Queue action validate check fail");
16618                                 action_flags |= MLX5_FLOW_ACTION_QUEUE;
16619                                 ++actions_n;
16620                                 break;
16621                         case RTE_FLOW_ACTION_TYPE_RSS:
16622                                 if (dev_conf->dv_flow_en &&
16623                                         (dev_conf->dv_xmeta_en !=
16624                                         MLX5_XMETA_MODE_LEGACY) &&
16625                                         mlx5_flow_ext_mreg_supported(dev))
16626                                         return -rte_mtr_error_set(error,
16627                                           ENOTSUP,
16628                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
16629                                           NULL, "RSS action with meta "
16630                                           "is not supported. Please try use "
16631                                           "default policy for meter.");
16632                                 ret = mlx5_validate_action_rss(dev, act,
16633                                                 &flow_err);
16634                                 if (ret < 0)
16635                                         return -rte_mtr_error_set(error,
16636                                           ENOTSUP,
16637                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
16638                                           NULL, flow_err.message ?
16639                                           flow_err.message :
16640                                           "RSS action validate check fail");
16641                                 action_flags |= MLX5_FLOW_ACTION_RSS;
16642                                 ++actions_n;
16643                                 *is_rss = true;
16644                                 break;
16645                         case RTE_FLOW_ACTION_TYPE_JUMP:
16646                                 ret = flow_dv_validate_action_jump(dev,
16647                                         NULL, act, action_flags,
16648                                         attr, true, &flow_err);
16649                                 if (ret)
16650                                         return -rte_mtr_error_set(error,
16651                                           ENOTSUP,
16652                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
16653                                           NULL, flow_err.message ?
16654                                           flow_err.message :
16655                                           "Jump action validate check fail");
16656                                 ++actions_n;
16657                                 action_flags |= MLX5_FLOW_ACTION_JUMP;
16658                                 break;
16659                         default:
16660                                 return -rte_mtr_error_set(error, ENOTSUP,
16661                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16662                                         NULL,
16663                                         "Doesn't support optional action");
16664                         }
16665                 }
16666                 /* Yellow is not supported, just skip. */
16667                 if (i == RTE_COLOR_YELLOW)
16668                         continue;
16669                 if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
16670                         domain_color[i] = MLX5_MTR_DOMAIN_TRANSFER_BIT;
16671                 else if ((action_flags &
16672                         (MLX5_FLOW_ACTION_RSS | MLX5_FLOW_ACTION_QUEUE)) ||
16673                         (action_flags & MLX5_FLOW_ACTION_MARK))
16674                         /*
16675                          * Only support MLX5_XMETA_MODE_LEGACY
16676                          * so MARK action only in ingress domain.
16677                          */
16678                         domain_color[i] = MLX5_MTR_DOMAIN_INGRESS_BIT;
16679                 else
16680                         domain_color[i] = def_domain;
16681                 /*
16682                  * Validate the drop action mutual exclusion
16683                  * with other actions. Drop action is mutually-exclusive
16684                  * with any other action, except for Count action.
16685                  */
16686                 if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
16687                         (action_flags & ~MLX5_FLOW_ACTION_DROP)) {
16688                         return -rte_mtr_error_set(error, ENOTSUP,
16689                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
16690                                 NULL, "Drop action is mutually-exclusive "
16691                                 "with any other action");
16692                 }
16693                 /* Eswitch has few restrictions on using items and actions */
16694                 if (domain_color[i] & MLX5_MTR_DOMAIN_TRANSFER_BIT) {
16695                         if (!mlx5_flow_ext_mreg_supported(dev) &&
16696                                 action_flags & MLX5_FLOW_ACTION_MARK)
16697                                 return -rte_mtr_error_set(error, ENOTSUP,
16698                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16699                                         NULL, "unsupported action MARK");
16700                         if (action_flags & MLX5_FLOW_ACTION_QUEUE)
16701                                 return -rte_mtr_error_set(error, ENOTSUP,
16702                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16703                                         NULL, "unsupported action QUEUE");
16704                         if (action_flags & MLX5_FLOW_ACTION_RSS)
16705                                 return -rte_mtr_error_set(error, ENOTSUP,
16706                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16707                                         NULL, "unsupported action RSS");
16708                         if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
16709                                 return -rte_mtr_error_set(error, ENOTSUP,
16710                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16711                                         NULL, "no fate action is found");
16712                 } else {
16713                         if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) &&
16714                                 (domain_color[i] &
16715                                 MLX5_MTR_DOMAIN_INGRESS_BIT)) {
16716                                 if ((domain_color[i] &
16717                                         MLX5_MTR_DOMAIN_EGRESS_BIT))
16718                                         domain_color[i] =
16719                                         MLX5_MTR_DOMAIN_EGRESS_BIT;
16720                                 else
16721                                         return -rte_mtr_error_set(error,
16722                                         ENOTSUP,
16723                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16724                                         NULL, "no fate action is found");
16725                         }
16726                 }
16727                 if (domain_color[i] != def_domain)
16728                         *domain_bitmap = domain_color[i];
16729         }
16730         return 0;
16731 }
16732
16733 static int
16734 flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags)
16735 {
16736         struct mlx5_priv *priv = dev->data->dev_private;
16737         int ret = 0;
16738
16739         if ((domains & MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain != NULL) {
16740                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->rx_domain,
16741                                                 flags);
16742                 if (ret != 0)
16743                         return ret;
16744         }
16745         if ((domains & MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain != NULL) {
16746                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->tx_domain, flags);
16747                 if (ret != 0)
16748                         return ret;
16749         }
16750         if ((domains & MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain != NULL) {
16751                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->fdb_domain, flags);
16752                 if (ret != 0)
16753                         return ret;
16754         }
16755         return 0;
16756 }
16757
16758 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
16759         .validate = flow_dv_validate,
16760         .prepare = flow_dv_prepare,
16761         .translate = flow_dv_translate,
16762         .apply = flow_dv_apply,
16763         .remove = flow_dv_remove,
16764         .destroy = flow_dv_destroy,
16765         .query = flow_dv_query,
16766         .create_mtr_tbls = flow_dv_create_mtr_tbls,
16767         .destroy_mtr_tbls = flow_dv_destroy_mtr_tbls,
16768         .destroy_mtr_drop_tbls = flow_dv_destroy_mtr_drop_tbls,
16769         .create_meter = flow_dv_mtr_alloc,
16770         .free_meter = flow_dv_aso_mtr_release_to_pool,
16771         .validate_mtr_acts = flow_dv_validate_mtr_policy_acts,
16772         .create_mtr_acts = flow_dv_create_mtr_policy_acts,
16773         .destroy_mtr_acts = flow_dv_destroy_mtr_policy_acts,
16774         .create_policy_rules = flow_dv_create_policy_rules,
16775         .destroy_policy_rules = flow_dv_destroy_policy_rules,
16776         .create_def_policy = flow_dv_create_def_policy,
16777         .destroy_def_policy = flow_dv_destroy_def_policy,
16778         .meter_sub_policy_rss_prepare = flow_dv_meter_sub_policy_rss_prepare,
16779         .destroy_sub_policy_with_rxq = flow_dv_destroy_sub_policy_with_rxq,
16780         .counter_alloc = flow_dv_counter_allocate,
16781         .counter_free = flow_dv_counter_free,
16782         .counter_query = flow_dv_counter_query,
16783         .get_aged_flows = flow_get_aged_flows,
16784         .action_validate = flow_dv_action_validate,
16785         .action_create = flow_dv_action_create,
16786         .action_destroy = flow_dv_action_destroy,
16787         .action_update = flow_dv_action_update,
16788         .action_query = flow_dv_action_query,
16789         .sync_domain = flow_dv_sync_domain,
16790 };
16791
16792 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
16793