net/mlx5: fix representor ID check for sampling
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <ethdev_driver.h>
14 #include <rte_flow.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_cycles.h>
18 #include <rte_ip.h>
19 #include <rte_gre.h>
20 #include <rte_vxlan.h>
21 #include <rte_gtp.h>
22 #include <rte_eal_paging.h>
23 #include <rte_mpls.h>
24 #include <rte_mtr.h>
25 #include <rte_mtr_driver.h>
26
27 #include <mlx5_glue.h>
28 #include <mlx5_devx_cmds.h>
29 #include <mlx5_prm.h>
30 #include <mlx5_malloc.h>
31
32 #include "mlx5_defs.h"
33 #include "mlx5.h"
34 #include "mlx5_common_os.h"
35 #include "mlx5_flow.h"
36 #include "mlx5_flow_os.h"
37 #include "mlx5_rx.h"
38 #include "mlx5_tx.h"
39 #include "rte_pmd_mlx5.h"
40
41 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
42
43 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
44 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
45 #endif
46
47 #ifndef HAVE_MLX5DV_DR_ESWITCH
48 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
49 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
50 #endif
51 #endif
52
53 #ifndef HAVE_MLX5DV_DR
54 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
55 #endif
56
57 /* VLAN header definitions */
58 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
59 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
60 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
61 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
62 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
63
64 union flow_dv_attr {
65         struct {
66                 uint32_t valid:1;
67                 uint32_t ipv4:1;
68                 uint32_t ipv6:1;
69                 uint32_t tcp:1;
70                 uint32_t udp:1;
71                 uint32_t reserved:27;
72         };
73         uint32_t attr;
74 };
75
76 static int
77 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
78                              struct mlx5_flow_tbl_resource *tbl);
79
80 static int
81 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
82                                      uint32_t encap_decap_idx);
83
84 static int
85 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
86                                         uint32_t port_id);
87 static void
88 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss);
89
90 static int
91 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
92                                   uint32_t rix_jump);
93
94 /**
95  * Initialize flow attributes structure according to flow items' types.
96  *
97  * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel
98  * mode. For tunnel mode, the items to be modified are the outermost ones.
99  *
100  * @param[in] item
101  *   Pointer to item specification.
102  * @param[out] attr
103  *   Pointer to flow attributes structure.
104  * @param[in] dev_flow
105  *   Pointer to the sub flow.
106  * @param[in] tunnel_decap
107  *   Whether action is after tunnel decapsulation.
108  */
109 static void
110 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
111                   struct mlx5_flow *dev_flow, bool tunnel_decap)
112 {
113         uint64_t layers = dev_flow->handle->layers;
114
115         /*
116          * If layers is already initialized, it means this dev_flow is the
117          * suffix flow, the layers flags is set by the prefix flow. Need to
118          * use the layer flags from prefix flow as the suffix flow may not
119          * have the user defined items as the flow is split.
120          */
121         if (layers) {
122                 if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
123                         attr->ipv4 = 1;
124                 else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
125                         attr->ipv6 = 1;
126                 if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
127                         attr->tcp = 1;
128                 else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
129                         attr->udp = 1;
130                 attr->valid = 1;
131                 return;
132         }
133         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
134                 uint8_t next_protocol = 0xff;
135                 switch (item->type) {
136                 case RTE_FLOW_ITEM_TYPE_GRE:
137                 case RTE_FLOW_ITEM_TYPE_NVGRE:
138                 case RTE_FLOW_ITEM_TYPE_VXLAN:
139                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
140                 case RTE_FLOW_ITEM_TYPE_GENEVE:
141                 case RTE_FLOW_ITEM_TYPE_MPLS:
142                         if (tunnel_decap)
143                                 attr->attr = 0;
144                         break;
145                 case RTE_FLOW_ITEM_TYPE_IPV4:
146                         if (!attr->ipv6)
147                                 attr->ipv4 = 1;
148                         if (item->mask != NULL &&
149                             ((const struct rte_flow_item_ipv4 *)
150                             item->mask)->hdr.next_proto_id)
151                                 next_protocol =
152                                     ((const struct rte_flow_item_ipv4 *)
153                                       (item->spec))->hdr.next_proto_id &
154                                     ((const struct rte_flow_item_ipv4 *)
155                                       (item->mask))->hdr.next_proto_id;
156                         if ((next_protocol == IPPROTO_IPIP ||
157                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
158                                 attr->attr = 0;
159                         break;
160                 case RTE_FLOW_ITEM_TYPE_IPV6:
161                         if (!attr->ipv4)
162                                 attr->ipv6 = 1;
163                         if (item->mask != NULL &&
164                             ((const struct rte_flow_item_ipv6 *)
165                             item->mask)->hdr.proto)
166                                 next_protocol =
167                                     ((const struct rte_flow_item_ipv6 *)
168                                       (item->spec))->hdr.proto &
169                                     ((const struct rte_flow_item_ipv6 *)
170                                       (item->mask))->hdr.proto;
171                         if ((next_protocol == IPPROTO_IPIP ||
172                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
173                                 attr->attr = 0;
174                         break;
175                 case RTE_FLOW_ITEM_TYPE_UDP:
176                         if (!attr->tcp)
177                                 attr->udp = 1;
178                         break;
179                 case RTE_FLOW_ITEM_TYPE_TCP:
180                         if (!attr->udp)
181                                 attr->tcp = 1;
182                         break;
183                 default:
184                         break;
185                 }
186         }
187         attr->valid = 1;
188 }
189
190 /**
191  * Convert rte_mtr_color to mlx5 color.
192  *
193  * @param[in] rcol
194  *   rte_mtr_color.
195  *
196  * @return
197  *   mlx5 color.
198  */
199 static int
200 rte_col_2_mlx5_col(enum rte_color rcol)
201 {
202         switch (rcol) {
203         case RTE_COLOR_GREEN:
204                 return MLX5_FLOW_COLOR_GREEN;
205         case RTE_COLOR_YELLOW:
206                 return MLX5_FLOW_COLOR_YELLOW;
207         case RTE_COLOR_RED:
208                 return MLX5_FLOW_COLOR_RED;
209         default:
210                 break;
211         }
212         return MLX5_FLOW_COLOR_UNDEFINED;
213 }
214
215 struct field_modify_info {
216         uint32_t size; /* Size of field in protocol header, in bytes. */
217         uint32_t offset; /* Offset of field in protocol header, in bytes. */
218         enum mlx5_modification_field id;
219 };
220
221 struct field_modify_info modify_eth[] = {
222         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
223         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
224         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
225         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
226         {0, 0, 0},
227 };
228
229 struct field_modify_info modify_vlan_out_first_vid[] = {
230         /* Size in bits !!! */
231         {12, 0, MLX5_MODI_OUT_FIRST_VID},
232         {0, 0, 0},
233 };
234
235 struct field_modify_info modify_ipv4[] = {
236         {1,  1, MLX5_MODI_OUT_IP_DSCP},
237         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
238         {4, 12, MLX5_MODI_OUT_SIPV4},
239         {4, 16, MLX5_MODI_OUT_DIPV4},
240         {0, 0, 0},
241 };
242
243 struct field_modify_info modify_ipv6[] = {
244         {1,  0, MLX5_MODI_OUT_IP_DSCP},
245         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
246         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
247         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
248         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
249         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
250         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
251         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
252         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
253         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
254         {0, 0, 0},
255 };
256
257 struct field_modify_info modify_udp[] = {
258         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
259         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
260         {0, 0, 0},
261 };
262
263 struct field_modify_info modify_tcp[] = {
264         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
265         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
266         {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
267         {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
268         {0, 0, 0},
269 };
270
271 static const struct rte_flow_item *
272 mlx5_flow_find_tunnel_item(const struct rte_flow_item *item)
273 {
274         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
275                 switch (item->type) {
276                 default:
277                         break;
278                 case RTE_FLOW_ITEM_TYPE_VXLAN:
279                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
280                 case RTE_FLOW_ITEM_TYPE_GRE:
281                 case RTE_FLOW_ITEM_TYPE_MPLS:
282                 case RTE_FLOW_ITEM_TYPE_NVGRE:
283                 case RTE_FLOW_ITEM_TYPE_GENEVE:
284                         return item;
285                 case RTE_FLOW_ITEM_TYPE_IPV4:
286                 case RTE_FLOW_ITEM_TYPE_IPV6:
287                         if (item[1].type == RTE_FLOW_ITEM_TYPE_IPV4 ||
288                             item[1].type == RTE_FLOW_ITEM_TYPE_IPV6)
289                                 return item;
290                         break;
291                 }
292         }
293         return NULL;
294 }
295
296 static void
297 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
298                           uint8_t next_protocol, uint64_t *item_flags,
299                           int *tunnel)
300 {
301         MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
302                     item->type == RTE_FLOW_ITEM_TYPE_IPV6);
303         if (next_protocol == IPPROTO_IPIP) {
304                 *item_flags |= MLX5_FLOW_LAYER_IPIP;
305                 *tunnel = 1;
306         }
307         if (next_protocol == IPPROTO_IPV6) {
308                 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
309                 *tunnel = 1;
310         }
311 }
312
313 /* Update VLAN's VID/PCP based on input rte_flow_action.
314  *
315  * @param[in] action
316  *   Pointer to struct rte_flow_action.
317  * @param[out] vlan
318  *   Pointer to struct rte_vlan_hdr.
319  */
320 static void
321 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
322                          struct rte_vlan_hdr *vlan)
323 {
324         uint16_t vlan_tci;
325         if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
326                 vlan_tci =
327                     ((const struct rte_flow_action_of_set_vlan_pcp *)
328                                                action->conf)->vlan_pcp;
329                 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
330                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
331                 vlan->vlan_tci |= vlan_tci;
332         } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
333                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
334                 vlan->vlan_tci |= rte_be_to_cpu_16
335                     (((const struct rte_flow_action_of_set_vlan_vid *)
336                                              action->conf)->vlan_vid);
337         }
338 }
339
340 /**
341  * Fetch 1, 2, 3 or 4 byte field from the byte array
342  * and return as unsigned integer in host-endian format.
343  *
344  * @param[in] data
345  *   Pointer to data array.
346  * @param[in] size
347  *   Size of field to extract.
348  *
349  * @return
350  *   converted field in host endian format.
351  */
352 static inline uint32_t
353 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
354 {
355         uint32_t ret;
356
357         switch (size) {
358         case 1:
359                 ret = *data;
360                 break;
361         case 2:
362                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
363                 break;
364         case 3:
365                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
366                 ret = (ret << 8) | *(data + sizeof(uint16_t));
367                 break;
368         case 4:
369                 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
370                 break;
371         default:
372                 MLX5_ASSERT(false);
373                 ret = 0;
374                 break;
375         }
376         return ret;
377 }
378
379 /**
380  * Convert modify-header action to DV specification.
381  *
382  * Data length of each action is determined by provided field description
383  * and the item mask. Data bit offset and width of each action is determined
384  * by provided item mask.
385  *
386  * @param[in] item
387  *   Pointer to item specification.
388  * @param[in] field
389  *   Pointer to field modification information.
390  *     For MLX5_MODIFICATION_TYPE_SET specifies destination field.
391  *     For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
392  *     For MLX5_MODIFICATION_TYPE_COPY specifies source field.
393  * @param[in] dcopy
394  *   Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
395  *   Negative offset value sets the same offset as source offset.
396  *   size field is ignored, value is taken from source field.
397  * @param[in,out] resource
398  *   Pointer to the modify-header resource.
399  * @param[in] type
400  *   Type of modification.
401  * @param[out] error
402  *   Pointer to the error structure.
403  *
404  * @return
405  *   0 on success, a negative errno value otherwise and rte_errno is set.
406  */
407 static int
408 flow_dv_convert_modify_action(struct rte_flow_item *item,
409                               struct field_modify_info *field,
410                               struct field_modify_info *dcopy,
411                               struct mlx5_flow_dv_modify_hdr_resource *resource,
412                               uint32_t type, struct rte_flow_error *error)
413 {
414         uint32_t i = resource->actions_num;
415         struct mlx5_modification_cmd *actions = resource->actions;
416
417         /*
418          * The item and mask are provided in big-endian format.
419          * The fields should be presented as in big-endian format either.
420          * Mask must be always present, it defines the actual field width.
421          */
422         MLX5_ASSERT(item->mask);
423         MLX5_ASSERT(field->size);
424         do {
425                 unsigned int size_b;
426                 unsigned int off_b;
427                 uint32_t mask;
428                 uint32_t data;
429                 bool next_field = true;
430                 bool next_dcopy = true;
431
432                 if (i >= MLX5_MAX_MODIFY_NUM)
433                         return rte_flow_error_set(error, EINVAL,
434                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
435                                  "too many items to modify");
436                 /* Fetch variable byte size mask from the array. */
437                 mask = flow_dv_fetch_field((const uint8_t *)item->mask +
438                                            field->offset, field->size);
439                 if (!mask) {
440                         ++field;
441                         continue;
442                 }
443                 /* Deduce actual data width in bits from mask value. */
444                 off_b = rte_bsf32(mask);
445                 size_b = sizeof(uint32_t) * CHAR_BIT -
446                          off_b - __builtin_clz(mask);
447                 MLX5_ASSERT(size_b);
448                 actions[i] = (struct mlx5_modification_cmd) {
449                         .action_type = type,
450                         .field = field->id,
451                         .offset = off_b,
452                         .length = (size_b == sizeof(uint32_t) * CHAR_BIT) ?
453                                 0 : size_b,
454                 };
455                 if (type == MLX5_MODIFICATION_TYPE_COPY) {
456                         MLX5_ASSERT(dcopy);
457                         actions[i].dst_field = dcopy->id;
458                         actions[i].dst_offset =
459                                 (int)dcopy->offset < 0 ? off_b : dcopy->offset;
460                         /* Convert entire record to big-endian format. */
461                         actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
462                         /*
463                          * Destination field overflow. Copy leftovers of
464                          * a source field to the next destination field.
465                          */
466                         if ((size_b > dcopy->size * CHAR_BIT) && dcopy->size) {
467                                 actions[i].length = dcopy->size * CHAR_BIT;
468                                 field->offset += dcopy->size;
469                                 next_field = false;
470                         }
471                         /*
472                          * Not enough bits in a source filed to fill a
473                          * destination field. Switch to the next source.
474                          */
475                         if (dcopy->size > field->size &&
476                             (size_b == field->size * CHAR_BIT)) {
477                                 actions[i].length = field->size * CHAR_BIT;
478                                 dcopy->offset += field->size * CHAR_BIT;
479                                 next_dcopy = false;
480                         }
481                         if (next_dcopy)
482                                 ++dcopy;
483                 } else {
484                         MLX5_ASSERT(item->spec);
485                         data = flow_dv_fetch_field((const uint8_t *)item->spec +
486                                                    field->offset, field->size);
487                         /* Shift out the trailing masked bits from data. */
488                         data = (data & mask) >> off_b;
489                         actions[i].data1 = rte_cpu_to_be_32(data);
490                 }
491                 /* Convert entire record to expected big-endian format. */
492                 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
493                 if (next_field)
494                         ++field;
495                 ++i;
496         } while (field->size);
497         if (resource->actions_num == i)
498                 return rte_flow_error_set(error, EINVAL,
499                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
500                                           "invalid modification flow item");
501         resource->actions_num = i;
502         return 0;
503 }
504
505 /**
506  * Convert modify-header set IPv4 address action to DV specification.
507  *
508  * @param[in,out] resource
509  *   Pointer to the modify-header resource.
510  * @param[in] action
511  *   Pointer to action specification.
512  * @param[out] error
513  *   Pointer to the error structure.
514  *
515  * @return
516  *   0 on success, a negative errno value otherwise and rte_errno is set.
517  */
518 static int
519 flow_dv_convert_action_modify_ipv4
520                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
521                          const struct rte_flow_action *action,
522                          struct rte_flow_error *error)
523 {
524         const struct rte_flow_action_set_ipv4 *conf =
525                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
526         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
527         struct rte_flow_item_ipv4 ipv4;
528         struct rte_flow_item_ipv4 ipv4_mask;
529
530         memset(&ipv4, 0, sizeof(ipv4));
531         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
532         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
533                 ipv4.hdr.src_addr = conf->ipv4_addr;
534                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
535         } else {
536                 ipv4.hdr.dst_addr = conf->ipv4_addr;
537                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
538         }
539         item.spec = &ipv4;
540         item.mask = &ipv4_mask;
541         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
542                                              MLX5_MODIFICATION_TYPE_SET, error);
543 }
544
545 /**
546  * Convert modify-header set IPv6 address action to DV specification.
547  *
548  * @param[in,out] resource
549  *   Pointer to the modify-header resource.
550  * @param[in] action
551  *   Pointer to action specification.
552  * @param[out] error
553  *   Pointer to the error structure.
554  *
555  * @return
556  *   0 on success, a negative errno value otherwise and rte_errno is set.
557  */
558 static int
559 flow_dv_convert_action_modify_ipv6
560                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
561                          const struct rte_flow_action *action,
562                          struct rte_flow_error *error)
563 {
564         const struct rte_flow_action_set_ipv6 *conf =
565                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
566         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
567         struct rte_flow_item_ipv6 ipv6;
568         struct rte_flow_item_ipv6 ipv6_mask;
569
570         memset(&ipv6, 0, sizeof(ipv6));
571         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
572         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
573                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
574                        sizeof(ipv6.hdr.src_addr));
575                 memcpy(&ipv6_mask.hdr.src_addr,
576                        &rte_flow_item_ipv6_mask.hdr.src_addr,
577                        sizeof(ipv6.hdr.src_addr));
578         } else {
579                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
580                        sizeof(ipv6.hdr.dst_addr));
581                 memcpy(&ipv6_mask.hdr.dst_addr,
582                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
583                        sizeof(ipv6.hdr.dst_addr));
584         }
585         item.spec = &ipv6;
586         item.mask = &ipv6_mask;
587         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
588                                              MLX5_MODIFICATION_TYPE_SET, error);
589 }
590
591 /**
592  * Convert modify-header set MAC address action to DV specification.
593  *
594  * @param[in,out] resource
595  *   Pointer to the modify-header resource.
596  * @param[in] action
597  *   Pointer to action specification.
598  * @param[out] error
599  *   Pointer to the error structure.
600  *
601  * @return
602  *   0 on success, a negative errno value otherwise and rte_errno is set.
603  */
604 static int
605 flow_dv_convert_action_modify_mac
606                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
607                          const struct rte_flow_action *action,
608                          struct rte_flow_error *error)
609 {
610         const struct rte_flow_action_set_mac *conf =
611                 (const struct rte_flow_action_set_mac *)(action->conf);
612         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
613         struct rte_flow_item_eth eth;
614         struct rte_flow_item_eth eth_mask;
615
616         memset(&eth, 0, sizeof(eth));
617         memset(&eth_mask, 0, sizeof(eth_mask));
618         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
619                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
620                        sizeof(eth.src.addr_bytes));
621                 memcpy(&eth_mask.src.addr_bytes,
622                        &rte_flow_item_eth_mask.src.addr_bytes,
623                        sizeof(eth_mask.src.addr_bytes));
624         } else {
625                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
626                        sizeof(eth.dst.addr_bytes));
627                 memcpy(&eth_mask.dst.addr_bytes,
628                        &rte_flow_item_eth_mask.dst.addr_bytes,
629                        sizeof(eth_mask.dst.addr_bytes));
630         }
631         item.spec = &eth;
632         item.mask = &eth_mask;
633         return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
634                                              MLX5_MODIFICATION_TYPE_SET, error);
635 }
636
637 /**
638  * Convert modify-header set VLAN VID action to DV specification.
639  *
640  * @param[in,out] resource
641  *   Pointer to the modify-header resource.
642  * @param[in] action
643  *   Pointer to action specification.
644  * @param[out] error
645  *   Pointer to the error structure.
646  *
647  * @return
648  *   0 on success, a negative errno value otherwise and rte_errno is set.
649  */
650 static int
651 flow_dv_convert_action_modify_vlan_vid
652                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
653                          const struct rte_flow_action *action,
654                          struct rte_flow_error *error)
655 {
656         const struct rte_flow_action_of_set_vlan_vid *conf =
657                 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
658         int i = resource->actions_num;
659         struct mlx5_modification_cmd *actions = resource->actions;
660         struct field_modify_info *field = modify_vlan_out_first_vid;
661
662         if (i >= MLX5_MAX_MODIFY_NUM)
663                 return rte_flow_error_set(error, EINVAL,
664                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
665                          "too many items to modify");
666         actions[i] = (struct mlx5_modification_cmd) {
667                 .action_type = MLX5_MODIFICATION_TYPE_SET,
668                 .field = field->id,
669                 .length = field->size,
670                 .offset = field->offset,
671         };
672         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
673         actions[i].data1 = conf->vlan_vid;
674         actions[i].data1 = actions[i].data1 << 16;
675         resource->actions_num = ++i;
676         return 0;
677 }
678
679 /**
680  * Convert modify-header set TP action to DV specification.
681  *
682  * @param[in,out] resource
683  *   Pointer to the modify-header resource.
684  * @param[in] action
685  *   Pointer to action specification.
686  * @param[in] items
687  *   Pointer to rte_flow_item objects list.
688  * @param[in] attr
689  *   Pointer to flow attributes structure.
690  * @param[in] dev_flow
691  *   Pointer to the sub flow.
692  * @param[in] tunnel_decap
693  *   Whether action is after tunnel decapsulation.
694  * @param[out] error
695  *   Pointer to the error structure.
696  *
697  * @return
698  *   0 on success, a negative errno value otherwise and rte_errno is set.
699  */
700 static int
701 flow_dv_convert_action_modify_tp
702                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
703                          const struct rte_flow_action *action,
704                          const struct rte_flow_item *items,
705                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
706                          bool tunnel_decap, struct rte_flow_error *error)
707 {
708         const struct rte_flow_action_set_tp *conf =
709                 (const struct rte_flow_action_set_tp *)(action->conf);
710         struct rte_flow_item item;
711         struct rte_flow_item_udp udp;
712         struct rte_flow_item_udp udp_mask;
713         struct rte_flow_item_tcp tcp;
714         struct rte_flow_item_tcp tcp_mask;
715         struct field_modify_info *field;
716
717         if (!attr->valid)
718                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
719         if (attr->udp) {
720                 memset(&udp, 0, sizeof(udp));
721                 memset(&udp_mask, 0, sizeof(udp_mask));
722                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
723                         udp.hdr.src_port = conf->port;
724                         udp_mask.hdr.src_port =
725                                         rte_flow_item_udp_mask.hdr.src_port;
726                 } else {
727                         udp.hdr.dst_port = conf->port;
728                         udp_mask.hdr.dst_port =
729                                         rte_flow_item_udp_mask.hdr.dst_port;
730                 }
731                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
732                 item.spec = &udp;
733                 item.mask = &udp_mask;
734                 field = modify_udp;
735         } else {
736                 MLX5_ASSERT(attr->tcp);
737                 memset(&tcp, 0, sizeof(tcp));
738                 memset(&tcp_mask, 0, sizeof(tcp_mask));
739                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
740                         tcp.hdr.src_port = conf->port;
741                         tcp_mask.hdr.src_port =
742                                         rte_flow_item_tcp_mask.hdr.src_port;
743                 } else {
744                         tcp.hdr.dst_port = conf->port;
745                         tcp_mask.hdr.dst_port =
746                                         rte_flow_item_tcp_mask.hdr.dst_port;
747                 }
748                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
749                 item.spec = &tcp;
750                 item.mask = &tcp_mask;
751                 field = modify_tcp;
752         }
753         return flow_dv_convert_modify_action(&item, field, NULL, resource,
754                                              MLX5_MODIFICATION_TYPE_SET, error);
755 }
756
757 /**
758  * Convert modify-header set TTL action to DV specification.
759  *
760  * @param[in,out] resource
761  *   Pointer to the modify-header resource.
762  * @param[in] action
763  *   Pointer to action specification.
764  * @param[in] items
765  *   Pointer to rte_flow_item objects list.
766  * @param[in] attr
767  *   Pointer to flow attributes structure.
768  * @param[in] dev_flow
769  *   Pointer to the sub flow.
770  * @param[in] tunnel_decap
771  *   Whether action is after tunnel decapsulation.
772  * @param[out] error
773  *   Pointer to the error structure.
774  *
775  * @return
776  *   0 on success, a negative errno value otherwise and rte_errno is set.
777  */
778 static int
779 flow_dv_convert_action_modify_ttl
780                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
781                          const struct rte_flow_action *action,
782                          const struct rte_flow_item *items,
783                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
784                          bool tunnel_decap, struct rte_flow_error *error)
785 {
786         const struct rte_flow_action_set_ttl *conf =
787                 (const struct rte_flow_action_set_ttl *)(action->conf);
788         struct rte_flow_item item;
789         struct rte_flow_item_ipv4 ipv4;
790         struct rte_flow_item_ipv4 ipv4_mask;
791         struct rte_flow_item_ipv6 ipv6;
792         struct rte_flow_item_ipv6 ipv6_mask;
793         struct field_modify_info *field;
794
795         if (!attr->valid)
796                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
797         if (attr->ipv4) {
798                 memset(&ipv4, 0, sizeof(ipv4));
799                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
800                 ipv4.hdr.time_to_live = conf->ttl_value;
801                 ipv4_mask.hdr.time_to_live = 0xFF;
802                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
803                 item.spec = &ipv4;
804                 item.mask = &ipv4_mask;
805                 field = modify_ipv4;
806         } else {
807                 MLX5_ASSERT(attr->ipv6);
808                 memset(&ipv6, 0, sizeof(ipv6));
809                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
810                 ipv6.hdr.hop_limits = conf->ttl_value;
811                 ipv6_mask.hdr.hop_limits = 0xFF;
812                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
813                 item.spec = &ipv6;
814                 item.mask = &ipv6_mask;
815                 field = modify_ipv6;
816         }
817         return flow_dv_convert_modify_action(&item, field, NULL, resource,
818                                              MLX5_MODIFICATION_TYPE_SET, error);
819 }
820
821 /**
822  * Convert modify-header decrement TTL action to DV specification.
823  *
824  * @param[in,out] resource
825  *   Pointer to the modify-header resource.
826  * @param[in] action
827  *   Pointer to action specification.
828  * @param[in] items
829  *   Pointer to rte_flow_item objects list.
830  * @param[in] attr
831  *   Pointer to flow attributes structure.
832  * @param[in] dev_flow
833  *   Pointer to the sub flow.
834  * @param[in] tunnel_decap
835  *   Whether action is after tunnel decapsulation.
836  * @param[out] error
837  *   Pointer to the error structure.
838  *
839  * @return
840  *   0 on success, a negative errno value otherwise and rte_errno is set.
841  */
842 static int
843 flow_dv_convert_action_modify_dec_ttl
844                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
845                          const struct rte_flow_item *items,
846                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
847                          bool tunnel_decap, struct rte_flow_error *error)
848 {
849         struct rte_flow_item item;
850         struct rte_flow_item_ipv4 ipv4;
851         struct rte_flow_item_ipv4 ipv4_mask;
852         struct rte_flow_item_ipv6 ipv6;
853         struct rte_flow_item_ipv6 ipv6_mask;
854         struct field_modify_info *field;
855
856         if (!attr->valid)
857                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
858         if (attr->ipv4) {
859                 memset(&ipv4, 0, sizeof(ipv4));
860                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
861                 ipv4.hdr.time_to_live = 0xFF;
862                 ipv4_mask.hdr.time_to_live = 0xFF;
863                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
864                 item.spec = &ipv4;
865                 item.mask = &ipv4_mask;
866                 field = modify_ipv4;
867         } else {
868                 MLX5_ASSERT(attr->ipv6);
869                 memset(&ipv6, 0, sizeof(ipv6));
870                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
871                 ipv6.hdr.hop_limits = 0xFF;
872                 ipv6_mask.hdr.hop_limits = 0xFF;
873                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
874                 item.spec = &ipv6;
875                 item.mask = &ipv6_mask;
876                 field = modify_ipv6;
877         }
878         return flow_dv_convert_modify_action(&item, field, NULL, resource,
879                                              MLX5_MODIFICATION_TYPE_ADD, error);
880 }
881
882 /**
883  * Convert modify-header increment/decrement TCP Sequence number
884  * to DV specification.
885  *
886  * @param[in,out] resource
887  *   Pointer to the modify-header resource.
888  * @param[in] action
889  *   Pointer to action specification.
890  * @param[out] error
891  *   Pointer to the error structure.
892  *
893  * @return
894  *   0 on success, a negative errno value otherwise and rte_errno is set.
895  */
896 static int
897 flow_dv_convert_action_modify_tcp_seq
898                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
899                          const struct rte_flow_action *action,
900                          struct rte_flow_error *error)
901 {
902         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
903         uint64_t value = rte_be_to_cpu_32(*conf);
904         struct rte_flow_item item;
905         struct rte_flow_item_tcp tcp;
906         struct rte_flow_item_tcp tcp_mask;
907
908         memset(&tcp, 0, sizeof(tcp));
909         memset(&tcp_mask, 0, sizeof(tcp_mask));
910         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
911                 /*
912                  * The HW has no decrement operation, only increment operation.
913                  * To simulate decrement X from Y using increment operation
914                  * we need to add UINT32_MAX X times to Y.
915                  * Each adding of UINT32_MAX decrements Y by 1.
916                  */
917                 value *= UINT32_MAX;
918         tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
919         tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
920         item.type = RTE_FLOW_ITEM_TYPE_TCP;
921         item.spec = &tcp;
922         item.mask = &tcp_mask;
923         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
924                                              MLX5_MODIFICATION_TYPE_ADD, error);
925 }
926
927 /**
928  * Convert modify-header increment/decrement TCP Acknowledgment number
929  * to DV specification.
930  *
931  * @param[in,out] resource
932  *   Pointer to the modify-header resource.
933  * @param[in] action
934  *   Pointer to action specification.
935  * @param[out] error
936  *   Pointer to the error structure.
937  *
938  * @return
939  *   0 on success, a negative errno value otherwise and rte_errno is set.
940  */
941 static int
942 flow_dv_convert_action_modify_tcp_ack
943                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
944                          const struct rte_flow_action *action,
945                          struct rte_flow_error *error)
946 {
947         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
948         uint64_t value = rte_be_to_cpu_32(*conf);
949         struct rte_flow_item item;
950         struct rte_flow_item_tcp tcp;
951         struct rte_flow_item_tcp tcp_mask;
952
953         memset(&tcp, 0, sizeof(tcp));
954         memset(&tcp_mask, 0, sizeof(tcp_mask));
955         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
956                 /*
957                  * The HW has no decrement operation, only increment operation.
958                  * To simulate decrement X from Y using increment operation
959                  * we need to add UINT32_MAX X times to Y.
960                  * Each adding of UINT32_MAX decrements Y by 1.
961                  */
962                 value *= UINT32_MAX;
963         tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
964         tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
965         item.type = RTE_FLOW_ITEM_TYPE_TCP;
966         item.spec = &tcp;
967         item.mask = &tcp_mask;
968         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
969                                              MLX5_MODIFICATION_TYPE_ADD, error);
970 }
971
972 static enum mlx5_modification_field reg_to_field[] = {
973         [REG_NON] = MLX5_MODI_OUT_NONE,
974         [REG_A] = MLX5_MODI_META_DATA_REG_A,
975         [REG_B] = MLX5_MODI_META_DATA_REG_B,
976         [REG_C_0] = MLX5_MODI_META_REG_C_0,
977         [REG_C_1] = MLX5_MODI_META_REG_C_1,
978         [REG_C_2] = MLX5_MODI_META_REG_C_2,
979         [REG_C_3] = MLX5_MODI_META_REG_C_3,
980         [REG_C_4] = MLX5_MODI_META_REG_C_4,
981         [REG_C_5] = MLX5_MODI_META_REG_C_5,
982         [REG_C_6] = MLX5_MODI_META_REG_C_6,
983         [REG_C_7] = MLX5_MODI_META_REG_C_7,
984 };
985
986 /**
987  * Convert register set to DV specification.
988  *
989  * @param[in,out] resource
990  *   Pointer to the modify-header resource.
991  * @param[in] action
992  *   Pointer to action specification.
993  * @param[out] error
994  *   Pointer to the error structure.
995  *
996  * @return
997  *   0 on success, a negative errno value otherwise and rte_errno is set.
998  */
999 static int
1000 flow_dv_convert_action_set_reg
1001                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1002                          const struct rte_flow_action *action,
1003                          struct rte_flow_error *error)
1004 {
1005         const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
1006         struct mlx5_modification_cmd *actions = resource->actions;
1007         uint32_t i = resource->actions_num;
1008
1009         if (i >= MLX5_MAX_MODIFY_NUM)
1010                 return rte_flow_error_set(error, EINVAL,
1011                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1012                                           "too many items to modify");
1013         MLX5_ASSERT(conf->id != REG_NON);
1014         MLX5_ASSERT(conf->id < (enum modify_reg)RTE_DIM(reg_to_field));
1015         actions[i] = (struct mlx5_modification_cmd) {
1016                 .action_type = MLX5_MODIFICATION_TYPE_SET,
1017                 .field = reg_to_field[conf->id],
1018                 .offset = conf->offset,
1019                 .length = conf->length,
1020         };
1021         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
1022         actions[i].data1 = rte_cpu_to_be_32(conf->data);
1023         ++i;
1024         resource->actions_num = i;
1025         return 0;
1026 }
1027
1028 /**
1029  * Convert SET_TAG action to DV specification.
1030  *
1031  * @param[in] dev
1032  *   Pointer to the rte_eth_dev structure.
1033  * @param[in,out] resource
1034  *   Pointer to the modify-header resource.
1035  * @param[in] conf
1036  *   Pointer to action specification.
1037  * @param[out] error
1038  *   Pointer to the error structure.
1039  *
1040  * @return
1041  *   0 on success, a negative errno value otherwise and rte_errno is set.
1042  */
1043 static int
1044 flow_dv_convert_action_set_tag
1045                         (struct rte_eth_dev *dev,
1046                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1047                          const struct rte_flow_action_set_tag *conf,
1048                          struct rte_flow_error *error)
1049 {
1050         rte_be32_t data = rte_cpu_to_be_32(conf->data);
1051         rte_be32_t mask = rte_cpu_to_be_32(conf->mask);
1052         struct rte_flow_item item = {
1053                 .spec = &data,
1054                 .mask = &mask,
1055         };
1056         struct field_modify_info reg_c_x[] = {
1057                 [1] = {0, 0, 0},
1058         };
1059         enum mlx5_modification_field reg_type;
1060         int ret;
1061
1062         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
1063         if (ret < 0)
1064                 return ret;
1065         MLX5_ASSERT(ret != REG_NON);
1066         MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
1067         reg_type = reg_to_field[ret];
1068         MLX5_ASSERT(reg_type > 0);
1069         reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
1070         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1071                                              MLX5_MODIFICATION_TYPE_SET, error);
1072 }
1073
1074 /**
1075  * Convert internal COPY_REG action to DV specification.
1076  *
1077  * @param[in] dev
1078  *   Pointer to the rte_eth_dev structure.
1079  * @param[in,out] res
1080  *   Pointer to the modify-header resource.
1081  * @param[in] action
1082  *   Pointer to action specification.
1083  * @param[out] error
1084  *   Pointer to the error structure.
1085  *
1086  * @return
1087  *   0 on success, a negative errno value otherwise and rte_errno is set.
1088  */
1089 static int
1090 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
1091                                  struct mlx5_flow_dv_modify_hdr_resource *res,
1092                                  const struct rte_flow_action *action,
1093                                  struct rte_flow_error *error)
1094 {
1095         const struct mlx5_flow_action_copy_mreg *conf = action->conf;
1096         rte_be32_t mask = RTE_BE32(UINT32_MAX);
1097         struct rte_flow_item item = {
1098                 .spec = NULL,
1099                 .mask = &mask,
1100         };
1101         struct field_modify_info reg_src[] = {
1102                 {4, 0, reg_to_field[conf->src]},
1103                 {0, 0, 0},
1104         };
1105         struct field_modify_info reg_dst = {
1106                 .offset = 0,
1107                 .id = reg_to_field[conf->dst],
1108         };
1109         /* Adjust reg_c[0] usage according to reported mask. */
1110         if (conf->dst == REG_C_0 || conf->src == REG_C_0) {
1111                 struct mlx5_priv *priv = dev->data->dev_private;
1112                 uint32_t reg_c0 = priv->sh->dv_regc0_mask;
1113
1114                 MLX5_ASSERT(reg_c0);
1115                 MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
1116                 if (conf->dst == REG_C_0) {
1117                         /* Copy to reg_c[0], within mask only. */
1118                         reg_dst.offset = rte_bsf32(reg_c0);
1119                         /*
1120                          * Mask is ignoring the enianness, because
1121                          * there is no conversion in datapath.
1122                          */
1123 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1124                         /* Copy from destination lower bits to reg_c[0]. */
1125                         mask = reg_c0 >> reg_dst.offset;
1126 #else
1127                         /* Copy from destination upper bits to reg_c[0]. */
1128                         mask = reg_c0 << (sizeof(reg_c0) * CHAR_BIT -
1129                                           rte_fls_u32(reg_c0));
1130 #endif
1131                 } else {
1132                         mask = rte_cpu_to_be_32(reg_c0);
1133 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1134                         /* Copy from reg_c[0] to destination lower bits. */
1135                         reg_dst.offset = 0;
1136 #else
1137                         /* Copy from reg_c[0] to destination upper bits. */
1138                         reg_dst.offset = sizeof(reg_c0) * CHAR_BIT -
1139                                          (rte_fls_u32(reg_c0) -
1140                                           rte_bsf32(reg_c0));
1141 #endif
1142                 }
1143         }
1144         return flow_dv_convert_modify_action(&item,
1145                                              reg_src, &reg_dst, res,
1146                                              MLX5_MODIFICATION_TYPE_COPY,
1147                                              error);
1148 }
1149
1150 /**
1151  * Convert MARK action to DV specification. This routine is used
1152  * in extensive metadata only and requires metadata register to be
1153  * handled. In legacy mode hardware tag resource is engaged.
1154  *
1155  * @param[in] dev
1156  *   Pointer to the rte_eth_dev structure.
1157  * @param[in] conf
1158  *   Pointer to MARK action specification.
1159  * @param[in,out] resource
1160  *   Pointer to the modify-header resource.
1161  * @param[out] error
1162  *   Pointer to the error structure.
1163  *
1164  * @return
1165  *   0 on success, a negative errno value otherwise and rte_errno is set.
1166  */
1167 static int
1168 flow_dv_convert_action_mark(struct rte_eth_dev *dev,
1169                             const struct rte_flow_action_mark *conf,
1170                             struct mlx5_flow_dv_modify_hdr_resource *resource,
1171                             struct rte_flow_error *error)
1172 {
1173         struct mlx5_priv *priv = dev->data->dev_private;
1174         rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK &
1175                                            priv->sh->dv_mark_mask);
1176         rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask;
1177         struct rte_flow_item item = {
1178                 .spec = &data,
1179                 .mask = &mask,
1180         };
1181         struct field_modify_info reg_c_x[] = {
1182                 [1] = {0, 0, 0},
1183         };
1184         int reg;
1185
1186         if (!mask)
1187                 return rte_flow_error_set(error, EINVAL,
1188                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1189                                           NULL, "zero mark action mask");
1190         reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1191         if (reg < 0)
1192                 return reg;
1193         MLX5_ASSERT(reg > 0);
1194         if (reg == REG_C_0) {
1195                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1196                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1197
1198                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1199                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1200                 mask = rte_cpu_to_be_32(mask << shl_c0);
1201         }
1202         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1203         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1204                                              MLX5_MODIFICATION_TYPE_SET, error);
1205 }
1206
1207 /**
1208  * Get metadata register index for specified steering domain.
1209  *
1210  * @param[in] dev
1211  *   Pointer to the rte_eth_dev structure.
1212  * @param[in] attr
1213  *   Attributes of flow to determine steering domain.
1214  * @param[out] error
1215  *   Pointer to the error structure.
1216  *
1217  * @return
1218  *   positive index on success, a negative errno value otherwise
1219  *   and rte_errno is set.
1220  */
1221 static enum modify_reg
1222 flow_dv_get_metadata_reg(struct rte_eth_dev *dev,
1223                          const struct rte_flow_attr *attr,
1224                          struct rte_flow_error *error)
1225 {
1226         int reg =
1227                 mlx5_flow_get_reg_id(dev, attr->transfer ?
1228                                           MLX5_METADATA_FDB :
1229                                             attr->egress ?
1230                                             MLX5_METADATA_TX :
1231                                             MLX5_METADATA_RX, 0, error);
1232         if (reg < 0)
1233                 return rte_flow_error_set(error,
1234                                           ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1235                                           NULL, "unavailable "
1236                                           "metadata register");
1237         return reg;
1238 }
1239
1240 /**
1241  * Convert SET_META action to DV specification.
1242  *
1243  * @param[in] dev
1244  *   Pointer to the rte_eth_dev structure.
1245  * @param[in,out] resource
1246  *   Pointer to the modify-header resource.
1247  * @param[in] attr
1248  *   Attributes of flow that includes this item.
1249  * @param[in] conf
1250  *   Pointer to action specification.
1251  * @param[out] error
1252  *   Pointer to the error structure.
1253  *
1254  * @return
1255  *   0 on success, a negative errno value otherwise and rte_errno is set.
1256  */
1257 static int
1258 flow_dv_convert_action_set_meta
1259                         (struct rte_eth_dev *dev,
1260                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1261                          const struct rte_flow_attr *attr,
1262                          const struct rte_flow_action_set_meta *conf,
1263                          struct rte_flow_error *error)
1264 {
1265         uint32_t mask = rte_cpu_to_be_32(conf->mask);
1266         uint32_t data = rte_cpu_to_be_32(conf->data) & mask;
1267         struct rte_flow_item item = {
1268                 .spec = &data,
1269                 .mask = &mask,
1270         };
1271         struct field_modify_info reg_c_x[] = {
1272                 [1] = {0, 0, 0},
1273         };
1274         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1275
1276         if (reg < 0)
1277                 return reg;
1278         MLX5_ASSERT(reg != REG_NON);
1279         if (reg == REG_C_0) {
1280                 struct mlx5_priv *priv = dev->data->dev_private;
1281                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1282                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1283
1284                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1285                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1286                 mask = rte_cpu_to_be_32(mask << shl_c0);
1287         }
1288         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1289         /* The routine expects parameters in memory as big-endian ones. */
1290         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1291                                              MLX5_MODIFICATION_TYPE_SET, error);
1292 }
1293
1294 /**
1295  * Convert modify-header set IPv4 DSCP action to DV specification.
1296  *
1297  * @param[in,out] resource
1298  *   Pointer to the modify-header resource.
1299  * @param[in] action
1300  *   Pointer to action specification.
1301  * @param[out] error
1302  *   Pointer to the error structure.
1303  *
1304  * @return
1305  *   0 on success, a negative errno value otherwise and rte_errno is set.
1306  */
1307 static int
1308 flow_dv_convert_action_modify_ipv4_dscp
1309                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1310                          const struct rte_flow_action *action,
1311                          struct rte_flow_error *error)
1312 {
1313         const struct rte_flow_action_set_dscp *conf =
1314                 (const struct rte_flow_action_set_dscp *)(action->conf);
1315         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
1316         struct rte_flow_item_ipv4 ipv4;
1317         struct rte_flow_item_ipv4 ipv4_mask;
1318
1319         memset(&ipv4, 0, sizeof(ipv4));
1320         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
1321         ipv4.hdr.type_of_service = conf->dscp;
1322         ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2;
1323         item.spec = &ipv4;
1324         item.mask = &ipv4_mask;
1325         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
1326                                              MLX5_MODIFICATION_TYPE_SET, error);
1327 }
1328
1329 /**
1330  * Convert modify-header set IPv6 DSCP action to DV specification.
1331  *
1332  * @param[in,out] resource
1333  *   Pointer to the modify-header resource.
1334  * @param[in] action
1335  *   Pointer to action specification.
1336  * @param[out] error
1337  *   Pointer to the error structure.
1338  *
1339  * @return
1340  *   0 on success, a negative errno value otherwise and rte_errno is set.
1341  */
1342 static int
1343 flow_dv_convert_action_modify_ipv6_dscp
1344                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1345                          const struct rte_flow_action *action,
1346                          struct rte_flow_error *error)
1347 {
1348         const struct rte_flow_action_set_dscp *conf =
1349                 (const struct rte_flow_action_set_dscp *)(action->conf);
1350         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
1351         struct rte_flow_item_ipv6 ipv6;
1352         struct rte_flow_item_ipv6 ipv6_mask;
1353
1354         memset(&ipv6, 0, sizeof(ipv6));
1355         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
1356         /*
1357          * Even though the DSCP bits offset of IPv6 is not byte aligned,
1358          * rdma-core only accept the DSCP bits byte aligned start from
1359          * bit 0 to 5 as to be compatible with IPv4. No need to shift the
1360          * bits in IPv6 case as rdma-core requires byte aligned value.
1361          */
1362         ipv6.hdr.vtc_flow = conf->dscp;
1363         ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22;
1364         item.spec = &ipv6;
1365         item.mask = &ipv6_mask;
1366         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
1367                                              MLX5_MODIFICATION_TYPE_SET, error);
1368 }
1369
1370 static int
1371 mlx5_flow_item_field_width(struct mlx5_dev_config *config,
1372                            enum rte_flow_field_id field)
1373 {
1374         switch (field) {
1375         case RTE_FLOW_FIELD_START:
1376                 return 32;
1377         case RTE_FLOW_FIELD_MAC_DST:
1378         case RTE_FLOW_FIELD_MAC_SRC:
1379                 return 48;
1380         case RTE_FLOW_FIELD_VLAN_TYPE:
1381                 return 16;
1382         case RTE_FLOW_FIELD_VLAN_ID:
1383                 return 12;
1384         case RTE_FLOW_FIELD_MAC_TYPE:
1385                 return 16;
1386         case RTE_FLOW_FIELD_IPV4_DSCP:
1387                 return 6;
1388         case RTE_FLOW_FIELD_IPV4_TTL:
1389                 return 8;
1390         case RTE_FLOW_FIELD_IPV4_SRC:
1391         case RTE_FLOW_FIELD_IPV4_DST:
1392                 return 32;
1393         case RTE_FLOW_FIELD_IPV6_DSCP:
1394                 return 6;
1395         case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1396                 return 8;
1397         case RTE_FLOW_FIELD_IPV6_SRC:
1398         case RTE_FLOW_FIELD_IPV6_DST:
1399                 return 128;
1400         case RTE_FLOW_FIELD_TCP_PORT_SRC:
1401         case RTE_FLOW_FIELD_TCP_PORT_DST:
1402                 return 16;
1403         case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1404         case RTE_FLOW_FIELD_TCP_ACK_NUM:
1405                 return 32;
1406         case RTE_FLOW_FIELD_TCP_FLAGS:
1407                 return 9;
1408         case RTE_FLOW_FIELD_UDP_PORT_SRC:
1409         case RTE_FLOW_FIELD_UDP_PORT_DST:
1410                 return 16;
1411         case RTE_FLOW_FIELD_VXLAN_VNI:
1412         case RTE_FLOW_FIELD_GENEVE_VNI:
1413                 return 24;
1414         case RTE_FLOW_FIELD_GTP_TEID:
1415         case RTE_FLOW_FIELD_TAG:
1416                 return 32;
1417         case RTE_FLOW_FIELD_MARK:
1418                 return 24;
1419         case RTE_FLOW_FIELD_META:
1420                 if (config->dv_xmeta_en == MLX5_XMETA_MODE_META16)
1421                         return 16;
1422                 else if (config->dv_xmeta_en == MLX5_XMETA_MODE_META32)
1423                         return 32;
1424                 else
1425                         return 0;
1426         case RTE_FLOW_FIELD_POINTER:
1427         case RTE_FLOW_FIELD_VALUE:
1428                 return 64;
1429         default:
1430                 MLX5_ASSERT(false);
1431         }
1432         return 0;
1433 }
1434
1435 static void
1436 mlx5_flow_field_id_to_modify_info
1437                 (const struct rte_flow_action_modify_data *data,
1438                  struct field_modify_info *info,
1439                  uint32_t *mask, uint32_t *value,
1440                  uint32_t width, uint32_t dst_width,
1441                  struct rte_eth_dev *dev,
1442                  const struct rte_flow_attr *attr,
1443                  struct rte_flow_error *error)
1444 {
1445         struct mlx5_priv *priv = dev->data->dev_private;
1446         struct mlx5_dev_config *config = &priv->config;
1447         uint32_t idx = 0;
1448         uint32_t off = 0;
1449         uint64_t val = 0;
1450         switch (data->field) {
1451         case RTE_FLOW_FIELD_START:
1452                 /* not supported yet */
1453                 MLX5_ASSERT(false);
1454                 break;
1455         case RTE_FLOW_FIELD_MAC_DST:
1456                 off = data->offset > 16 ? data->offset - 16 : 0;
1457                 if (mask) {
1458                         if (data->offset < 16) {
1459                                 info[idx] = (struct field_modify_info){2, 0,
1460                                                 MLX5_MODI_OUT_DMAC_15_0};
1461                                 if (width < 16) {
1462                                         mask[idx] = rte_cpu_to_be_16(0xffff >>
1463                                                                  (16 - width));
1464                                         width = 0;
1465                                 } else {
1466                                         mask[idx] = RTE_BE16(0xffff);
1467                                         width -= 16;
1468                                 }
1469                                 if (!width)
1470                                         break;
1471                                 ++idx;
1472                         }
1473                         info[idx] = (struct field_modify_info){4, 4 * idx,
1474                                                 MLX5_MODI_OUT_DMAC_47_16};
1475                         mask[idx] = rte_cpu_to_be_32((0xffffffff >>
1476                                                       (32 - width)) << off);
1477                 } else {
1478                         if (data->offset < 16)
1479                                 info[idx++] = (struct field_modify_info){2, 0,
1480                                                 MLX5_MODI_OUT_DMAC_15_0};
1481                         info[idx] = (struct field_modify_info){4, off,
1482                                                 MLX5_MODI_OUT_DMAC_47_16};
1483                 }
1484                 break;
1485         case RTE_FLOW_FIELD_MAC_SRC:
1486                 off = data->offset > 16 ? data->offset - 16 : 0;
1487                 if (mask) {
1488                         if (data->offset < 16) {
1489                                 info[idx] = (struct field_modify_info){2, 0,
1490                                                 MLX5_MODI_OUT_SMAC_15_0};
1491                                 if (width < 16) {
1492                                         mask[idx] = rte_cpu_to_be_16(0xffff >>
1493                                                                  (16 - width));
1494                                         width = 0;
1495                                 } else {
1496                                         mask[idx] = RTE_BE16(0xffff);
1497                                         width -= 16;
1498                                 }
1499                                 if (!width)
1500                                         break;
1501                                 ++idx;
1502                         }
1503                         info[idx] = (struct field_modify_info){4, 4 * idx,
1504                                                 MLX5_MODI_OUT_SMAC_47_16};
1505                         mask[idx] = rte_cpu_to_be_32((0xffffffff >>
1506                                                       (32 - width)) << off);
1507                 } else {
1508                         if (data->offset < 16)
1509                                 info[idx++] = (struct field_modify_info){2, 0,
1510                                                 MLX5_MODI_OUT_SMAC_15_0};
1511                         info[idx] = (struct field_modify_info){4, off,
1512                                                 MLX5_MODI_OUT_SMAC_47_16};
1513                 }
1514                 break;
1515         case RTE_FLOW_FIELD_VLAN_TYPE:
1516                 /* not supported yet */
1517                 break;
1518         case RTE_FLOW_FIELD_VLAN_ID:
1519                 info[idx] = (struct field_modify_info){2, 0,
1520                                         MLX5_MODI_OUT_FIRST_VID};
1521                 if (mask)
1522                         mask[idx] = rte_cpu_to_be_16(0x0fff >> (12 - width));
1523                 break;
1524         case RTE_FLOW_FIELD_MAC_TYPE:
1525                 info[idx] = (struct field_modify_info){2, 0,
1526                                         MLX5_MODI_OUT_ETHERTYPE};
1527                 if (mask)
1528                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1529                 break;
1530         case RTE_FLOW_FIELD_IPV4_DSCP:
1531                 info[idx] = (struct field_modify_info){1, 0,
1532                                         MLX5_MODI_OUT_IP_DSCP};
1533                 if (mask)
1534                         mask[idx] = 0x3f >> (6 - width);
1535                 break;
1536         case RTE_FLOW_FIELD_IPV4_TTL:
1537                 info[idx] = (struct field_modify_info){1, 0,
1538                                         MLX5_MODI_OUT_IPV4_TTL};
1539                 if (mask)
1540                         mask[idx] = 0xff >> (8 - width);
1541                 break;
1542         case RTE_FLOW_FIELD_IPV4_SRC:
1543                 info[idx] = (struct field_modify_info){4, 0,
1544                                         MLX5_MODI_OUT_SIPV4};
1545                 if (mask)
1546                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1547                                                      (32 - width));
1548                 break;
1549         case RTE_FLOW_FIELD_IPV4_DST:
1550                 info[idx] = (struct field_modify_info){4, 0,
1551                                         MLX5_MODI_OUT_DIPV4};
1552                 if (mask)
1553                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1554                                                      (32 - width));
1555                 break;
1556         case RTE_FLOW_FIELD_IPV6_DSCP:
1557                 info[idx] = (struct field_modify_info){1, 0,
1558                                         MLX5_MODI_OUT_IP_DSCP};
1559                 if (mask)
1560                         mask[idx] = 0x3f >> (6 - width);
1561                 break;
1562         case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1563                 info[idx] = (struct field_modify_info){1, 0,
1564                                         MLX5_MODI_OUT_IPV6_HOPLIMIT};
1565                 if (mask)
1566                         mask[idx] = 0xff >> (8 - width);
1567                 break;
1568         case RTE_FLOW_FIELD_IPV6_SRC:
1569                 if (mask) {
1570                         if (data->offset < 32) {
1571                                 info[idx] = (struct field_modify_info){4,
1572                                                 4 * idx,
1573                                                 MLX5_MODI_OUT_SIPV6_31_0};
1574                                 if (width < 32) {
1575                                         mask[idx] =
1576                                                 rte_cpu_to_be_32(0xffffffff >>
1577                                                                  (32 - width));
1578                                         width = 0;
1579                                 } else {
1580                                         mask[idx] = RTE_BE32(0xffffffff);
1581                                         width -= 32;
1582                                 }
1583                                 if (!width)
1584                                         break;
1585                                 ++idx;
1586                         }
1587                         if (data->offset < 64) {
1588                                 info[idx] = (struct field_modify_info){4,
1589                                                 4 * idx,
1590                                                 MLX5_MODI_OUT_SIPV6_63_32};
1591                                 if (width < 32) {
1592                                         mask[idx] =
1593                                                 rte_cpu_to_be_32(0xffffffff >>
1594                                                                  (32 - width));
1595                                         width = 0;
1596                                 } else {
1597                                         mask[idx] = RTE_BE32(0xffffffff);
1598                                         width -= 32;
1599                                 }
1600                                 if (!width)
1601                                         break;
1602                                 ++idx;
1603                         }
1604                         if (data->offset < 96) {
1605                                 info[idx] = (struct field_modify_info){4,
1606                                                 4 * idx,
1607                                                 MLX5_MODI_OUT_SIPV6_95_64};
1608                                 if (width < 32) {
1609                                         mask[idx] =
1610                                                 rte_cpu_to_be_32(0xffffffff >>
1611                                                                  (32 - width));
1612                                         width = 0;
1613                                 } else {
1614                                         mask[idx] = RTE_BE32(0xffffffff);
1615                                         width -= 32;
1616                                 }
1617                                 if (!width)
1618                                         break;
1619                                 ++idx;
1620                         }
1621                         info[idx] = (struct field_modify_info){4, 4 * idx,
1622                                                 MLX5_MODI_OUT_SIPV6_127_96};
1623                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1624                                                      (32 - width));
1625                 } else {
1626                         if (data->offset < 32)
1627                                 info[idx++] = (struct field_modify_info){4, 0,
1628                                                 MLX5_MODI_OUT_SIPV6_31_0};
1629                         if (data->offset < 64)
1630                                 info[idx++] = (struct field_modify_info){4, 0,
1631                                                 MLX5_MODI_OUT_SIPV6_63_32};
1632                         if (data->offset < 96)
1633                                 info[idx++] = (struct field_modify_info){4, 0,
1634                                                 MLX5_MODI_OUT_SIPV6_95_64};
1635                         if (data->offset < 128)
1636                                 info[idx++] = (struct field_modify_info){4, 0,
1637                                                 MLX5_MODI_OUT_SIPV6_127_96};
1638                 }
1639                 break;
1640         case RTE_FLOW_FIELD_IPV6_DST:
1641                 if (mask) {
1642                         if (data->offset < 32) {
1643                                 info[idx] = (struct field_modify_info){4,
1644                                                 4 * idx,
1645                                                 MLX5_MODI_OUT_DIPV6_31_0};
1646                                 if (width < 32) {
1647                                         mask[idx] =
1648                                                 rte_cpu_to_be_32(0xffffffff >>
1649                                                                  (32 - width));
1650                                         width = 0;
1651                                 } else {
1652                                         mask[idx] = RTE_BE32(0xffffffff);
1653                                         width -= 32;
1654                                 }
1655                                 if (!width)
1656                                         break;
1657                                 ++idx;
1658                         }
1659                         if (data->offset < 64) {
1660                                 info[idx] = (struct field_modify_info){4,
1661                                                 4 * idx,
1662                                                 MLX5_MODI_OUT_DIPV6_63_32};
1663                                 if (width < 32) {
1664                                         mask[idx] =
1665                                                 rte_cpu_to_be_32(0xffffffff >>
1666                                                                  (32 - width));
1667                                         width = 0;
1668                                 } else {
1669                                         mask[idx] = RTE_BE32(0xffffffff);
1670                                         width -= 32;
1671                                 }
1672                                 if (!width)
1673                                         break;
1674                                 ++idx;
1675                         }
1676                         if (data->offset < 96) {
1677                                 info[idx] = (struct field_modify_info){4,
1678                                                 4 * idx,
1679                                                 MLX5_MODI_OUT_DIPV6_95_64};
1680                                 if (width < 32) {
1681                                         mask[idx] =
1682                                                 rte_cpu_to_be_32(0xffffffff >>
1683                                                                  (32 - width));
1684                                         width = 0;
1685                                 } else {
1686                                         mask[idx] = RTE_BE32(0xffffffff);
1687                                         width -= 32;
1688                                 }
1689                                 if (!width)
1690                                         break;
1691                                 ++idx;
1692                         }
1693                         info[idx] = (struct field_modify_info){4, 4 * idx,
1694                                                 MLX5_MODI_OUT_DIPV6_127_96};
1695                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1696                                                      (32 - width));
1697                 } else {
1698                         if (data->offset < 32)
1699                                 info[idx++] = (struct field_modify_info){4, 0,
1700                                                 MLX5_MODI_OUT_DIPV6_31_0};
1701                         if (data->offset < 64)
1702                                 info[idx++] = (struct field_modify_info){4, 0,
1703                                                 MLX5_MODI_OUT_DIPV6_63_32};
1704                         if (data->offset < 96)
1705                                 info[idx++] = (struct field_modify_info){4, 0,
1706                                                 MLX5_MODI_OUT_DIPV6_95_64};
1707                         if (data->offset < 128)
1708                                 info[idx++] = (struct field_modify_info){4, 0,
1709                                                 MLX5_MODI_OUT_DIPV6_127_96};
1710                 }
1711                 break;
1712         case RTE_FLOW_FIELD_TCP_PORT_SRC:
1713                 info[idx] = (struct field_modify_info){2, 0,
1714                                         MLX5_MODI_OUT_TCP_SPORT};
1715                 if (mask)
1716                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1717                 break;
1718         case RTE_FLOW_FIELD_TCP_PORT_DST:
1719                 info[idx] = (struct field_modify_info){2, 0,
1720                                         MLX5_MODI_OUT_TCP_DPORT};
1721                 if (mask)
1722                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1723                 break;
1724         case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1725                 info[idx] = (struct field_modify_info){4, 0,
1726                                         MLX5_MODI_OUT_TCP_SEQ_NUM};
1727                 if (mask)
1728                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1729                                                      (32 - width));
1730                 break;
1731         case RTE_FLOW_FIELD_TCP_ACK_NUM:
1732                 info[idx] = (struct field_modify_info){4, 0,
1733                                         MLX5_MODI_OUT_TCP_ACK_NUM};
1734                 if (mask)
1735                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1736                                                      (32 - width));
1737                 break;
1738         case RTE_FLOW_FIELD_TCP_FLAGS:
1739                 info[idx] = (struct field_modify_info){2, 0,
1740                                         MLX5_MODI_OUT_TCP_FLAGS};
1741                 if (mask)
1742                         mask[idx] = rte_cpu_to_be_16(0x1ff >> (9 - width));
1743                 break;
1744         case RTE_FLOW_FIELD_UDP_PORT_SRC:
1745                 info[idx] = (struct field_modify_info){2, 0,
1746                                         MLX5_MODI_OUT_UDP_SPORT};
1747                 if (mask)
1748                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1749                 break;
1750         case RTE_FLOW_FIELD_UDP_PORT_DST:
1751                 info[idx] = (struct field_modify_info){2, 0,
1752                                         MLX5_MODI_OUT_UDP_DPORT};
1753                 if (mask)
1754                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1755                 break;
1756         case RTE_FLOW_FIELD_VXLAN_VNI:
1757                 /* not supported yet */
1758                 break;
1759         case RTE_FLOW_FIELD_GENEVE_VNI:
1760                 /* not supported yet*/
1761                 break;
1762         case RTE_FLOW_FIELD_GTP_TEID:
1763                 info[idx] = (struct field_modify_info){4, 0,
1764                                         MLX5_MODI_GTP_TEID};
1765                 if (mask)
1766                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1767                                                      (32 - width));
1768                 break;
1769         case RTE_FLOW_FIELD_TAG:
1770                 {
1771                         int reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG,
1772                                                    data->level, error);
1773                         if (reg < 0)
1774                                 return;
1775                         MLX5_ASSERT(reg != REG_NON);
1776                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1777                         info[idx] = (struct field_modify_info){4, 0,
1778                                                 reg_to_field[reg]};
1779                         if (mask)
1780                                 mask[idx] =
1781                                         rte_cpu_to_be_32(0xffffffff >>
1782                                                          (32 - width));
1783                 }
1784                 break;
1785         case RTE_FLOW_FIELD_MARK:
1786                 {
1787                         int reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK,
1788                                                        0, error);
1789                         if (reg < 0)
1790                                 return;
1791                         MLX5_ASSERT(reg != REG_NON);
1792                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1793                         info[idx] = (struct field_modify_info){4, 0,
1794                                                 reg_to_field[reg]};
1795                         if (mask)
1796                                 mask[idx] =
1797                                         rte_cpu_to_be_32(0xffffffff >>
1798                                                          (32 - width));
1799                 }
1800                 break;
1801         case RTE_FLOW_FIELD_META:
1802                 {
1803                         unsigned int xmeta = config->dv_xmeta_en;
1804                         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1805                         if (reg < 0)
1806                                 return;
1807                         MLX5_ASSERT(reg != REG_NON);
1808                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1809                         if (xmeta == MLX5_XMETA_MODE_META16) {
1810                                 info[idx] = (struct field_modify_info){2, 0,
1811                                                         reg_to_field[reg]};
1812                                 if (mask)
1813                                         mask[idx] = rte_cpu_to_be_16(0xffff >>
1814                                                                 (16 - width));
1815                         } else if (xmeta == MLX5_XMETA_MODE_META32) {
1816                                 info[idx] = (struct field_modify_info){4, 0,
1817                                                         reg_to_field[reg]};
1818                                 if (mask)
1819                                         mask[idx] =
1820                                                 rte_cpu_to_be_32(0xffffffff >>
1821                                                                 (32 - width));
1822                         } else {
1823                                 MLX5_ASSERT(false);
1824                         }
1825                 }
1826                 break;
1827         case RTE_FLOW_FIELD_POINTER:
1828         case RTE_FLOW_FIELD_VALUE:
1829                 if (data->field == RTE_FLOW_FIELD_POINTER)
1830                         memcpy(&val, (void *)(uintptr_t)data->value,
1831                                sizeof(uint64_t));
1832                 else
1833                         val = data->value;
1834                 for (idx = 0; idx < MLX5_ACT_MAX_MOD_FIELDS; idx++) {
1835                         if (mask[idx]) {
1836                                 if (dst_width == 48) {
1837                                         /*special case for MAC addresses */
1838                                         value[idx] = rte_cpu_to_be_16(val);
1839                                         val >>= 16;
1840                                         dst_width -= 16;
1841                                 } else if (dst_width > 16) {
1842                                         value[idx] = rte_cpu_to_be_32(val);
1843                                         val >>= 32;
1844                                 } else if (dst_width > 8) {
1845                                         value[idx] = rte_cpu_to_be_16(val);
1846                                         val >>= 16;
1847                                 } else {
1848                                         value[idx] = (uint8_t)val;
1849                                         val >>= 8;
1850                                 }
1851                                 if (!val)
1852                                         break;
1853                         }
1854                 }
1855                 break;
1856         default:
1857                 MLX5_ASSERT(false);
1858                 break;
1859         }
1860 }
1861
1862 /**
1863  * Convert modify_field action to DV specification.
1864  *
1865  * @param[in] dev
1866  *   Pointer to the rte_eth_dev structure.
1867  * @param[in,out] resource
1868  *   Pointer to the modify-header resource.
1869  * @param[in] action
1870  *   Pointer to action specification.
1871  * @param[in] attr
1872  *   Attributes of flow that includes this item.
1873  * @param[out] error
1874  *   Pointer to the error structure.
1875  *
1876  * @return
1877  *   0 on success, a negative errno value otherwise and rte_errno is set.
1878  */
1879 static int
1880 flow_dv_convert_action_modify_field
1881                         (struct rte_eth_dev *dev,
1882                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1883                          const struct rte_flow_action *action,
1884                          const struct rte_flow_attr *attr,
1885                          struct rte_flow_error *error)
1886 {
1887         struct mlx5_priv *priv = dev->data->dev_private;
1888         struct mlx5_dev_config *config = &priv->config;
1889         const struct rte_flow_action_modify_field *conf =
1890                 (const struct rte_flow_action_modify_field *)(action->conf);
1891         struct rte_flow_item item;
1892         struct field_modify_info field[MLX5_ACT_MAX_MOD_FIELDS] = {
1893                                                                 {0, 0, 0} };
1894         struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS] = {
1895                                                                 {0, 0, 0} };
1896         uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
1897         uint32_t value[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
1898         uint32_t type;
1899         uint32_t dst_width = mlx5_flow_item_field_width(config,
1900                                                         conf->dst.field);
1901
1902         if (conf->src.field == RTE_FLOW_FIELD_POINTER ||
1903                 conf->src.field == RTE_FLOW_FIELD_VALUE) {
1904                 type = MLX5_MODIFICATION_TYPE_SET;
1905                 /** For SET fill the destination field (field) first. */
1906                 mlx5_flow_field_id_to_modify_info(&conf->dst, field, mask,
1907                         value, conf->width, dst_width, dev, attr, error);
1908                 /** Then copy immediate value from source as per mask. */
1909                 mlx5_flow_field_id_to_modify_info(&conf->src, dcopy, mask,
1910                         value, conf->width, dst_width, dev, attr, error);
1911                 item.spec = &value;
1912         } else {
1913                 type = MLX5_MODIFICATION_TYPE_COPY;
1914                 /** For COPY fill the destination field (dcopy) without mask. */
1915                 mlx5_flow_field_id_to_modify_info(&conf->dst, dcopy, NULL,
1916                         value, conf->width, dst_width, dev, attr, error);
1917                 /** Then construct the source field (field) with mask. */
1918                 mlx5_flow_field_id_to_modify_info(&conf->src, field, mask,
1919                         value, conf->width, dst_width, dev, attr, error);
1920         }
1921         item.mask = &mask;
1922         return flow_dv_convert_modify_action(&item,
1923                         field, dcopy, resource, type, error);
1924 }
1925
1926 /**
1927  * Validate MARK item.
1928  *
1929  * @param[in] dev
1930  *   Pointer to the rte_eth_dev structure.
1931  * @param[in] item
1932  *   Item specification.
1933  * @param[in] attr
1934  *   Attributes of flow that includes this item.
1935  * @param[out] error
1936  *   Pointer to error structure.
1937  *
1938  * @return
1939  *   0 on success, a negative errno value otherwise and rte_errno is set.
1940  */
1941 static int
1942 flow_dv_validate_item_mark(struct rte_eth_dev *dev,
1943                            const struct rte_flow_item *item,
1944                            const struct rte_flow_attr *attr __rte_unused,
1945                            struct rte_flow_error *error)
1946 {
1947         struct mlx5_priv *priv = dev->data->dev_private;
1948         struct mlx5_dev_config *config = &priv->config;
1949         const struct rte_flow_item_mark *spec = item->spec;
1950         const struct rte_flow_item_mark *mask = item->mask;
1951         const struct rte_flow_item_mark nic_mask = {
1952                 .id = priv->sh->dv_mark_mask,
1953         };
1954         int ret;
1955
1956         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1957                 return rte_flow_error_set(error, ENOTSUP,
1958                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1959                                           "extended metadata feature"
1960                                           " isn't enabled");
1961         if (!mlx5_flow_ext_mreg_supported(dev))
1962                 return rte_flow_error_set(error, ENOTSUP,
1963                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1964                                           "extended metadata register"
1965                                           " isn't supported");
1966         if (!nic_mask.id)
1967                 return rte_flow_error_set(error, ENOTSUP,
1968                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1969                                           "extended metadata register"
1970                                           " isn't available");
1971         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1972         if (ret < 0)
1973                 return ret;
1974         if (!spec)
1975                 return rte_flow_error_set(error, EINVAL,
1976                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1977                                           item->spec,
1978                                           "data cannot be empty");
1979         if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id))
1980                 return rte_flow_error_set(error, EINVAL,
1981                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1982                                           &spec->id,
1983                                           "mark id exceeds the limit");
1984         if (!mask)
1985                 mask = &nic_mask;
1986         if (!mask->id)
1987                 return rte_flow_error_set(error, EINVAL,
1988                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1989                                         "mask cannot be zero");
1990
1991         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1992                                         (const uint8_t *)&nic_mask,
1993                                         sizeof(struct rte_flow_item_mark),
1994                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1995         if (ret < 0)
1996                 return ret;
1997         return 0;
1998 }
1999
2000 /**
2001  * Validate META item.
2002  *
2003  * @param[in] dev
2004  *   Pointer to the rte_eth_dev structure.
2005  * @param[in] item
2006  *   Item specification.
2007  * @param[in] attr
2008  *   Attributes of flow that includes this item.
2009  * @param[out] error
2010  *   Pointer to error structure.
2011  *
2012  * @return
2013  *   0 on success, a negative errno value otherwise and rte_errno is set.
2014  */
2015 static int
2016 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
2017                            const struct rte_flow_item *item,
2018                            const struct rte_flow_attr *attr,
2019                            struct rte_flow_error *error)
2020 {
2021         struct mlx5_priv *priv = dev->data->dev_private;
2022         struct mlx5_dev_config *config = &priv->config;
2023         const struct rte_flow_item_meta *spec = item->spec;
2024         const struct rte_flow_item_meta *mask = item->mask;
2025         struct rte_flow_item_meta nic_mask = {
2026                 .data = UINT32_MAX
2027         };
2028         int reg;
2029         int ret;
2030
2031         if (!spec)
2032                 return rte_flow_error_set(error, EINVAL,
2033                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2034                                           item->spec,
2035                                           "data cannot be empty");
2036         if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
2037                 if (!mlx5_flow_ext_mreg_supported(dev))
2038                         return rte_flow_error_set(error, ENOTSUP,
2039                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2040                                           "extended metadata register"
2041                                           " isn't supported");
2042                 reg = flow_dv_get_metadata_reg(dev, attr, error);
2043                 if (reg < 0)
2044                         return reg;
2045                 if (reg == REG_NON)
2046                         return rte_flow_error_set(error, ENOTSUP,
2047                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2048                                         "unavalable extended metadata register");
2049                 if (reg == REG_B)
2050                         return rte_flow_error_set(error, ENOTSUP,
2051                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2052                                           "match on reg_b "
2053                                           "isn't supported");
2054                 if (reg != REG_A)
2055                         nic_mask.data = priv->sh->dv_meta_mask;
2056         } else {
2057                 if (attr->transfer)
2058                         return rte_flow_error_set(error, ENOTSUP,
2059                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2060                                         "extended metadata feature "
2061                                         "should be enabled when "
2062                                         "meta item is requested "
2063                                         "with e-switch mode ");
2064                 if (attr->ingress)
2065                         return rte_flow_error_set(error, ENOTSUP,
2066                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2067                                         "match on metadata for ingress "
2068                                         "is not supported in legacy "
2069                                         "metadata mode");
2070         }
2071         if (!mask)
2072                 mask = &rte_flow_item_meta_mask;
2073         if (!mask->data)
2074                 return rte_flow_error_set(error, EINVAL,
2075                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2076                                         "mask cannot be zero");
2077
2078         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2079                                         (const uint8_t *)&nic_mask,
2080                                         sizeof(struct rte_flow_item_meta),
2081                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2082         return ret;
2083 }
2084
2085 /**
2086  * Validate TAG item.
2087  *
2088  * @param[in] dev
2089  *   Pointer to the rte_eth_dev structure.
2090  * @param[in] item
2091  *   Item specification.
2092  * @param[in] attr
2093  *   Attributes of flow that includes this item.
2094  * @param[out] error
2095  *   Pointer to error structure.
2096  *
2097  * @return
2098  *   0 on success, a negative errno value otherwise and rte_errno is set.
2099  */
2100 static int
2101 flow_dv_validate_item_tag(struct rte_eth_dev *dev,
2102                           const struct rte_flow_item *item,
2103                           const struct rte_flow_attr *attr __rte_unused,
2104                           struct rte_flow_error *error)
2105 {
2106         const struct rte_flow_item_tag *spec = item->spec;
2107         const struct rte_flow_item_tag *mask = item->mask;
2108         const struct rte_flow_item_tag nic_mask = {
2109                 .data = RTE_BE32(UINT32_MAX),
2110                 .index = 0xff,
2111         };
2112         int ret;
2113
2114         if (!mlx5_flow_ext_mreg_supported(dev))
2115                 return rte_flow_error_set(error, ENOTSUP,
2116                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2117                                           "extensive metadata register"
2118                                           " isn't supported");
2119         if (!spec)
2120                 return rte_flow_error_set(error, EINVAL,
2121                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2122                                           item->spec,
2123                                           "data cannot be empty");
2124         if (!mask)
2125                 mask = &rte_flow_item_tag_mask;
2126         if (!mask->data)
2127                 return rte_flow_error_set(error, EINVAL,
2128                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2129                                         "mask cannot be zero");
2130
2131         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2132                                         (const uint8_t *)&nic_mask,
2133                                         sizeof(struct rte_flow_item_tag),
2134                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2135         if (ret < 0)
2136                 return ret;
2137         if (mask->index != 0xff)
2138                 return rte_flow_error_set(error, EINVAL,
2139                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2140                                           "partial mask for tag index"
2141                                           " is not supported");
2142         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
2143         if (ret < 0)
2144                 return ret;
2145         MLX5_ASSERT(ret != REG_NON);
2146         return 0;
2147 }
2148
2149 /**
2150  * Validate vport item.
2151  *
2152  * @param[in] dev
2153  *   Pointer to the rte_eth_dev structure.
2154  * @param[in] item
2155  *   Item specification.
2156  * @param[in] attr
2157  *   Attributes of flow that includes this item.
2158  * @param[in] item_flags
2159  *   Bit-fields that holds the items detected until now.
2160  * @param[out] error
2161  *   Pointer to error structure.
2162  *
2163  * @return
2164  *   0 on success, a negative errno value otherwise and rte_errno is set.
2165  */
2166 static int
2167 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
2168                               const struct rte_flow_item *item,
2169                               const struct rte_flow_attr *attr,
2170                               uint64_t item_flags,
2171                               struct rte_flow_error *error)
2172 {
2173         const struct rte_flow_item_port_id *spec = item->spec;
2174         const struct rte_flow_item_port_id *mask = item->mask;
2175         const struct rte_flow_item_port_id switch_mask = {
2176                         .id = 0xffffffff,
2177         };
2178         struct mlx5_priv *esw_priv;
2179         struct mlx5_priv *dev_priv;
2180         int ret;
2181
2182         if (!attr->transfer)
2183                 return rte_flow_error_set(error, EINVAL,
2184                                           RTE_FLOW_ERROR_TYPE_ITEM,
2185                                           NULL,
2186                                           "match on port id is valid only"
2187                                           " when transfer flag is enabled");
2188         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
2189                 return rte_flow_error_set(error, ENOTSUP,
2190                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2191                                           "multiple source ports are not"
2192                                           " supported");
2193         if (!mask)
2194                 mask = &switch_mask;
2195         if (mask->id != 0xffffffff)
2196                 return rte_flow_error_set(error, ENOTSUP,
2197                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2198                                            mask,
2199                                            "no support for partial mask on"
2200                                            " \"id\" field");
2201         ret = mlx5_flow_item_acceptable
2202                                 (item, (const uint8_t *)mask,
2203                                  (const uint8_t *)&rte_flow_item_port_id_mask,
2204                                  sizeof(struct rte_flow_item_port_id),
2205                                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2206         if (ret)
2207                 return ret;
2208         if (!spec)
2209                 return 0;
2210         esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
2211         if (!esw_priv)
2212                 return rte_flow_error_set(error, rte_errno,
2213                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2214                                           "failed to obtain E-Switch info for"
2215                                           " port");
2216         dev_priv = mlx5_dev_to_eswitch_info(dev);
2217         if (!dev_priv)
2218                 return rte_flow_error_set(error, rte_errno,
2219                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2220                                           NULL,
2221                                           "failed to obtain E-Switch info");
2222         if (esw_priv->domain_id != dev_priv->domain_id)
2223                 return rte_flow_error_set(error, EINVAL,
2224                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2225                                           "cannot match on a port from a"
2226                                           " different E-Switch");
2227         return 0;
2228 }
2229
2230 /**
2231  * Validate VLAN item.
2232  *
2233  * @param[in] item
2234  *   Item specification.
2235  * @param[in] item_flags
2236  *   Bit-fields that holds the items detected until now.
2237  * @param[in] dev
2238  *   Ethernet device flow is being created on.
2239  * @param[out] error
2240  *   Pointer to error structure.
2241  *
2242  * @return
2243  *   0 on success, a negative errno value otherwise and rte_errno is set.
2244  */
2245 static int
2246 flow_dv_validate_item_vlan(const struct rte_flow_item *item,
2247                            uint64_t item_flags,
2248                            struct rte_eth_dev *dev,
2249                            struct rte_flow_error *error)
2250 {
2251         const struct rte_flow_item_vlan *mask = item->mask;
2252         const struct rte_flow_item_vlan nic_mask = {
2253                 .tci = RTE_BE16(UINT16_MAX),
2254                 .inner_type = RTE_BE16(UINT16_MAX),
2255                 .has_more_vlan = 1,
2256         };
2257         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2258         int ret;
2259         const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
2260                                         MLX5_FLOW_LAYER_INNER_L4) :
2261                                        (MLX5_FLOW_LAYER_OUTER_L3 |
2262                                         MLX5_FLOW_LAYER_OUTER_L4);
2263         const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
2264                                         MLX5_FLOW_LAYER_OUTER_VLAN;
2265
2266         if (item_flags & vlanm)
2267                 return rte_flow_error_set(error, EINVAL,
2268                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2269                                           "multiple VLAN layers not supported");
2270         else if ((item_flags & l34m) != 0)
2271                 return rte_flow_error_set(error, EINVAL,
2272                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2273                                           "VLAN cannot follow L3/L4 layer");
2274         if (!mask)
2275                 mask = &rte_flow_item_vlan_mask;
2276         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2277                                         (const uint8_t *)&nic_mask,
2278                                         sizeof(struct rte_flow_item_vlan),
2279                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2280         if (ret)
2281                 return ret;
2282         if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
2283                 struct mlx5_priv *priv = dev->data->dev_private;
2284
2285                 if (priv->vmwa_context) {
2286                         /*
2287                          * Non-NULL context means we have a virtual machine
2288                          * and SR-IOV enabled, we have to create VLAN interface
2289                          * to make hypervisor to setup E-Switch vport
2290                          * context correctly. We avoid creating the multiple
2291                          * VLAN interfaces, so we cannot support VLAN tag mask.
2292                          */
2293                         return rte_flow_error_set(error, EINVAL,
2294                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2295                                                   item,
2296                                                   "VLAN tag mask is not"
2297                                                   " supported in virtual"
2298                                                   " environment");
2299                 }
2300         }
2301         return 0;
2302 }
2303
2304 /*
2305  * GTP flags are contained in 1 byte of the format:
2306  * -------------------------------------------
2307  * | bit   | 0 - 2   | 3  | 4   | 5 | 6 | 7  |
2308  * |-----------------------------------------|
2309  * | value | Version | PT | Res | E | S | PN |
2310  * -------------------------------------------
2311  *
2312  * Matching is supported only for GTP flags E, S, PN.
2313  */
2314 #define MLX5_GTP_FLAGS_MASK     0x07
2315
2316 /**
2317  * Validate GTP item.
2318  *
2319  * @param[in] dev
2320  *   Pointer to the rte_eth_dev structure.
2321  * @param[in] item
2322  *   Item specification.
2323  * @param[in] item_flags
2324  *   Bit-fields that holds the items detected until now.
2325  * @param[out] error
2326  *   Pointer to error structure.
2327  *
2328  * @return
2329  *   0 on success, a negative errno value otherwise and rte_errno is set.
2330  */
2331 static int
2332 flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
2333                           const struct rte_flow_item *item,
2334                           uint64_t item_flags,
2335                           struct rte_flow_error *error)
2336 {
2337         struct mlx5_priv *priv = dev->data->dev_private;
2338         const struct rte_flow_item_gtp *spec = item->spec;
2339         const struct rte_flow_item_gtp *mask = item->mask;
2340         const struct rte_flow_item_gtp nic_mask = {
2341                 .v_pt_rsv_flags = MLX5_GTP_FLAGS_MASK,
2342                 .msg_type = 0xff,
2343                 .teid = RTE_BE32(0xffffffff),
2344         };
2345
2346         if (!priv->config.hca_attr.tunnel_stateless_gtp)
2347                 return rte_flow_error_set(error, ENOTSUP,
2348                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2349                                           "GTP support is not enabled");
2350         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2351                 return rte_flow_error_set(error, ENOTSUP,
2352                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2353                                           "multiple tunnel layers not"
2354                                           " supported");
2355         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2356                 return rte_flow_error_set(error, EINVAL,
2357                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2358                                           "no outer UDP layer found");
2359         if (!mask)
2360                 mask = &rte_flow_item_gtp_mask;
2361         if (spec && spec->v_pt_rsv_flags & ~MLX5_GTP_FLAGS_MASK)
2362                 return rte_flow_error_set(error, ENOTSUP,
2363                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2364                                           "Match is supported for GTP"
2365                                           " flags only");
2366         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2367                                          (const uint8_t *)&nic_mask,
2368                                          sizeof(struct rte_flow_item_gtp),
2369                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2370 }
2371
2372 /**
2373  * Validate GTP PSC item.
2374  *
2375  * @param[in] item
2376  *   Item specification.
2377  * @param[in] last_item
2378  *   Previous validated item in the pattern items.
2379  * @param[in] gtp_item
2380  *   Previous GTP item specification.
2381  * @param[in] attr
2382  *   Pointer to flow attributes.
2383  * @param[out] error
2384  *   Pointer to error structure.
2385  *
2386  * @return
2387  *   0 on success, a negative errno value otherwise and rte_errno is set.
2388  */
2389 static int
2390 flow_dv_validate_item_gtp_psc(const struct rte_flow_item *item,
2391                               uint64_t last_item,
2392                               const struct rte_flow_item *gtp_item,
2393                               const struct rte_flow_attr *attr,
2394                               struct rte_flow_error *error)
2395 {
2396         const struct rte_flow_item_gtp *gtp_spec;
2397         const struct rte_flow_item_gtp *gtp_mask;
2398         const struct rte_flow_item_gtp_psc *spec;
2399         const struct rte_flow_item_gtp_psc *mask;
2400         const struct rte_flow_item_gtp_psc nic_mask = {
2401                 .pdu_type = 0xFF,
2402                 .qfi = 0xFF,
2403         };
2404
2405         if (!gtp_item || !(last_item & MLX5_FLOW_LAYER_GTP))
2406                 return rte_flow_error_set
2407                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2408                          "GTP PSC item must be preceded with GTP item");
2409         gtp_spec = gtp_item->spec;
2410         gtp_mask = gtp_item->mask ? gtp_item->mask : &rte_flow_item_gtp_mask;
2411         /* GTP spec and E flag is requested to match zero. */
2412         if (gtp_spec &&
2413                 (gtp_mask->v_pt_rsv_flags &
2414                 ~gtp_spec->v_pt_rsv_flags & MLX5_GTP_EXT_HEADER_FLAG))
2415                 return rte_flow_error_set
2416                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2417                          "GTP E flag must be 1 to match GTP PSC");
2418         /* Check the flow is not created in group zero. */
2419         if (!attr->transfer && !attr->group)
2420                 return rte_flow_error_set
2421                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2422                          "GTP PSC is not supported for group 0");
2423         /* GTP spec is here and E flag is requested to match zero. */
2424         if (!item->spec)
2425                 return 0;
2426         spec = item->spec;
2427         mask = item->mask ? item->mask : &rte_flow_item_gtp_psc_mask;
2428         if (spec->pdu_type > MLX5_GTP_EXT_MAX_PDU_TYPE)
2429                 return rte_flow_error_set
2430                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2431                          "PDU type should be smaller than 16");
2432         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2433                                          (const uint8_t *)&nic_mask,
2434                                          sizeof(struct rte_flow_item_gtp_psc),
2435                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2436 }
2437
2438 /**
2439  * Validate IPV4 item.
2440  * Use existing validation function mlx5_flow_validate_item_ipv4(), and
2441  * add specific validation of fragment_offset field,
2442  *
2443  * @param[in] item
2444  *   Item specification.
2445  * @param[in] item_flags
2446  *   Bit-fields that holds the items detected until now.
2447  * @param[out] error
2448  *   Pointer to error structure.
2449  *
2450  * @return
2451  *   0 on success, a negative errno value otherwise and rte_errno is set.
2452  */
2453 static int
2454 flow_dv_validate_item_ipv4(const struct rte_flow_item *item,
2455                            uint64_t item_flags,
2456                            uint64_t last_item,
2457                            uint16_t ether_type,
2458                            struct rte_flow_error *error)
2459 {
2460         int ret;
2461         const struct rte_flow_item_ipv4 *spec = item->spec;
2462         const struct rte_flow_item_ipv4 *last = item->last;
2463         const struct rte_flow_item_ipv4 *mask = item->mask;
2464         rte_be16_t fragment_offset_spec = 0;
2465         rte_be16_t fragment_offset_last = 0;
2466         const struct rte_flow_item_ipv4 nic_ipv4_mask = {
2467                 .hdr = {
2468                         .src_addr = RTE_BE32(0xffffffff),
2469                         .dst_addr = RTE_BE32(0xffffffff),
2470                         .type_of_service = 0xff,
2471                         .fragment_offset = RTE_BE16(0xffff),
2472                         .next_proto_id = 0xff,
2473                         .time_to_live = 0xff,
2474                 },
2475         };
2476
2477         ret = mlx5_flow_validate_item_ipv4(item, item_flags, last_item,
2478                                            ether_type, &nic_ipv4_mask,
2479                                            MLX5_ITEM_RANGE_ACCEPTED, error);
2480         if (ret < 0)
2481                 return ret;
2482         if (spec && mask)
2483                 fragment_offset_spec = spec->hdr.fragment_offset &
2484                                        mask->hdr.fragment_offset;
2485         if (!fragment_offset_spec)
2486                 return 0;
2487         /*
2488          * spec and mask are valid, enforce using full mask to make sure the
2489          * complete value is used correctly.
2490          */
2491         if ((mask->hdr.fragment_offset & RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2492                         != RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2493                 return rte_flow_error_set(error, EINVAL,
2494                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2495                                           item, "must use full mask for"
2496                                           " fragment_offset");
2497         /*
2498          * Match on fragment_offset 0x2000 means MF is 1 and frag-offset is 0,
2499          * indicating this is 1st fragment of fragmented packet.
2500          * This is not yet supported in MLX5, return appropriate error message.
2501          */
2502         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG))
2503                 return rte_flow_error_set(error, ENOTSUP,
2504                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2505                                           "match on first fragment not "
2506                                           "supported");
2507         if (fragment_offset_spec && !last)
2508                 return rte_flow_error_set(error, ENOTSUP,
2509                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2510                                           "specified value not supported");
2511         /* spec and last are valid, validate the specified range. */
2512         fragment_offset_last = last->hdr.fragment_offset &
2513                                mask->hdr.fragment_offset;
2514         /*
2515          * Match on fragment_offset spec 0x2001 and last 0x3fff
2516          * means MF is 1 and frag-offset is > 0.
2517          * This packet is fragment 2nd and onward, excluding last.
2518          * This is not yet supported in MLX5, return appropriate
2519          * error message.
2520          */
2521         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG + 1) &&
2522             fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2523                 return rte_flow_error_set(error, ENOTSUP,
2524                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2525                                           last, "match on following "
2526                                           "fragments not supported");
2527         /*
2528          * Match on fragment_offset spec 0x0001 and last 0x1fff
2529          * means MF is 0 and frag-offset is > 0.
2530          * This packet is last fragment of fragmented packet.
2531          * This is not yet supported in MLX5, return appropriate
2532          * error message.
2533          */
2534         if (fragment_offset_spec == RTE_BE16(1) &&
2535             fragment_offset_last == RTE_BE16(RTE_IPV4_HDR_OFFSET_MASK))
2536                 return rte_flow_error_set(error, ENOTSUP,
2537                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2538                                           last, "match on last "
2539                                           "fragment not supported");
2540         /*
2541          * Match on fragment_offset spec 0x0001 and last 0x3fff
2542          * means MF and/or frag-offset is not 0.
2543          * This is a fragmented packet.
2544          * Other range values are invalid and rejected.
2545          */
2546         if (!(fragment_offset_spec == RTE_BE16(1) &&
2547               fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)))
2548                 return rte_flow_error_set(error, ENOTSUP,
2549                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2550                                           "specified range not supported");
2551         return 0;
2552 }
2553
2554 /**
2555  * Validate IPV6 fragment extension item.
2556  *
2557  * @param[in] item
2558  *   Item specification.
2559  * @param[in] item_flags
2560  *   Bit-fields that holds the items detected until now.
2561  * @param[out] error
2562  *   Pointer to error structure.
2563  *
2564  * @return
2565  *   0 on success, a negative errno value otherwise and rte_errno is set.
2566  */
2567 static int
2568 flow_dv_validate_item_ipv6_frag_ext(const struct rte_flow_item *item,
2569                                     uint64_t item_flags,
2570                                     struct rte_flow_error *error)
2571 {
2572         const struct rte_flow_item_ipv6_frag_ext *spec = item->spec;
2573         const struct rte_flow_item_ipv6_frag_ext *last = item->last;
2574         const struct rte_flow_item_ipv6_frag_ext *mask = item->mask;
2575         rte_be16_t frag_data_spec = 0;
2576         rte_be16_t frag_data_last = 0;
2577         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2578         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2579                                       MLX5_FLOW_LAYER_OUTER_L4;
2580         int ret = 0;
2581         struct rte_flow_item_ipv6_frag_ext nic_mask = {
2582                 .hdr = {
2583                         .next_header = 0xff,
2584                         .frag_data = RTE_BE16(0xffff),
2585                 },
2586         };
2587
2588         if (item_flags & l4m)
2589                 return rte_flow_error_set(error, EINVAL,
2590                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2591                                           "ipv6 fragment extension item cannot "
2592                                           "follow L4 item.");
2593         if ((tunnel && !(item_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
2594             (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6)))
2595                 return rte_flow_error_set(error, EINVAL,
2596                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2597                                           "ipv6 fragment extension item must "
2598                                           "follow ipv6 item");
2599         if (spec && mask)
2600                 frag_data_spec = spec->hdr.frag_data & mask->hdr.frag_data;
2601         if (!frag_data_spec)
2602                 return 0;
2603         /*
2604          * spec and mask are valid, enforce using full mask to make sure the
2605          * complete value is used correctly.
2606          */
2607         if ((mask->hdr.frag_data & RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) !=
2608                                 RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2609                 return rte_flow_error_set(error, EINVAL,
2610                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2611                                           item, "must use full mask for"
2612                                           " frag_data");
2613         /*
2614          * Match on frag_data 0x00001 means M is 1 and frag-offset is 0.
2615          * This is 1st fragment of fragmented packet.
2616          */
2617         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_MF_MASK))
2618                 return rte_flow_error_set(error, ENOTSUP,
2619                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2620                                           "match on first fragment not "
2621                                           "supported");
2622         if (frag_data_spec && !last)
2623                 return rte_flow_error_set(error, EINVAL,
2624                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2625                                           "specified value not supported");
2626         ret = mlx5_flow_item_acceptable
2627                                 (item, (const uint8_t *)mask,
2628                                  (const uint8_t *)&nic_mask,
2629                                  sizeof(struct rte_flow_item_ipv6_frag_ext),
2630                                  MLX5_ITEM_RANGE_ACCEPTED, error);
2631         if (ret)
2632                 return ret;
2633         /* spec and last are valid, validate the specified range. */
2634         frag_data_last = last->hdr.frag_data & mask->hdr.frag_data;
2635         /*
2636          * Match on frag_data spec 0x0009 and last 0xfff9
2637          * means M is 1 and frag-offset is > 0.
2638          * This packet is fragment 2nd and onward, excluding last.
2639          * This is not yet supported in MLX5, return appropriate
2640          * error message.
2641          */
2642         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN |
2643                                        RTE_IPV6_EHDR_MF_MASK) &&
2644             frag_data_last == RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2645                 return rte_flow_error_set(error, ENOTSUP,
2646                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2647                                           last, "match on following "
2648                                           "fragments not supported");
2649         /*
2650          * Match on frag_data spec 0x0008 and last 0xfff8
2651          * means M is 0 and frag-offset is > 0.
2652          * This packet is last fragment of fragmented packet.
2653          * This is not yet supported in MLX5, return appropriate
2654          * error message.
2655          */
2656         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN) &&
2657             frag_data_last == RTE_BE16(RTE_IPV6_EHDR_FO_MASK))
2658                 return rte_flow_error_set(error, ENOTSUP,
2659                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2660                                           last, "match on last "
2661                                           "fragment not supported");
2662         /* Other range values are invalid and rejected. */
2663         return rte_flow_error_set(error, EINVAL,
2664                                   RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2665                                   "specified range not supported");
2666 }
2667
2668 /*
2669  * Validate ASO CT item.
2670  *
2671  * @param[in] dev
2672  *   Pointer to the rte_eth_dev structure.
2673  * @param[in] item
2674  *   Item specification.
2675  * @param[in] item_flags
2676  *   Pointer to bit-fields that holds the items detected until now.
2677  * @param[out] error
2678  *   Pointer to error structure.
2679  *
2680  * @return
2681  *   0 on success, a negative errno value otherwise and rte_errno is set.
2682  */
2683 static int
2684 flow_dv_validate_item_aso_ct(struct rte_eth_dev *dev,
2685                              const struct rte_flow_item *item,
2686                              uint64_t *item_flags,
2687                              struct rte_flow_error *error)
2688 {
2689         const struct rte_flow_item_conntrack *spec = item->spec;
2690         const struct rte_flow_item_conntrack *mask = item->mask;
2691         RTE_SET_USED(dev);
2692         uint32_t flags;
2693
2694         if (*item_flags & MLX5_FLOW_LAYER_ASO_CT)
2695                 return rte_flow_error_set(error, EINVAL,
2696                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2697                                           "Only one CT is supported");
2698         if (!mask)
2699                 mask = &rte_flow_item_conntrack_mask;
2700         flags = spec->flags & mask->flags;
2701         if ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID) &&
2702             ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID) ||
2703              (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD) ||
2704              (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)))
2705                 return rte_flow_error_set(error, EINVAL,
2706                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2707                                           "Conflict status bits");
2708         /* State change also needs to be considered. */
2709         *item_flags |= MLX5_FLOW_LAYER_ASO_CT;
2710         return 0;
2711 }
2712
2713 /**
2714  * Validate the pop VLAN action.
2715  *
2716  * @param[in] dev
2717  *   Pointer to the rte_eth_dev structure.
2718  * @param[in] action_flags
2719  *   Holds the actions detected until now.
2720  * @param[in] action
2721  *   Pointer to the pop vlan action.
2722  * @param[in] item_flags
2723  *   The items found in this flow rule.
2724  * @param[in] attr
2725  *   Pointer to flow attributes.
2726  * @param[out] error
2727  *   Pointer to error structure.
2728  *
2729  * @return
2730  *   0 on success, a negative errno value otherwise and rte_errno is set.
2731  */
2732 static int
2733 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
2734                                  uint64_t action_flags,
2735                                  const struct rte_flow_action *action,
2736                                  uint64_t item_flags,
2737                                  const struct rte_flow_attr *attr,
2738                                  struct rte_flow_error *error)
2739 {
2740         const struct mlx5_priv *priv = dev->data->dev_private;
2741
2742         (void)action;
2743         (void)attr;
2744         if (!priv->sh->pop_vlan_action)
2745                 return rte_flow_error_set(error, ENOTSUP,
2746                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2747                                           NULL,
2748                                           "pop vlan action is not supported");
2749         if (attr->egress)
2750                 return rte_flow_error_set(error, ENOTSUP,
2751                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2752                                           NULL,
2753                                           "pop vlan action not supported for "
2754                                           "egress");
2755         if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
2756                 return rte_flow_error_set(error, ENOTSUP,
2757                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2758                                           "no support for multiple VLAN "
2759                                           "actions");
2760         /* Pop VLAN with preceding Decap requires inner header with VLAN. */
2761         if ((action_flags & MLX5_FLOW_ACTION_DECAP) &&
2762             !(item_flags & MLX5_FLOW_LAYER_INNER_VLAN))
2763                 return rte_flow_error_set(error, ENOTSUP,
2764                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2765                                           NULL,
2766                                           "cannot pop vlan after decap without "
2767                                           "match on inner vlan in the flow");
2768         /* Pop VLAN without preceding Decap requires outer header with VLAN. */
2769         if (!(action_flags & MLX5_FLOW_ACTION_DECAP) &&
2770             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2771                 return rte_flow_error_set(error, ENOTSUP,
2772                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2773                                           NULL,
2774                                           "cannot pop vlan without a "
2775                                           "match on (outer) vlan in the flow");
2776         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2777                 return rte_flow_error_set(error, EINVAL,
2778                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2779                                           "wrong action order, port_id should "
2780                                           "be after pop VLAN action");
2781         if (!attr->transfer && priv->representor)
2782                 return rte_flow_error_set(error, ENOTSUP,
2783                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2784                                           "pop vlan action for VF representor "
2785                                           "not supported on NIC table");
2786         return 0;
2787 }
2788
2789 /**
2790  * Get VLAN default info from vlan match info.
2791  *
2792  * @param[in] items
2793  *   the list of item specifications.
2794  * @param[out] vlan
2795  *   pointer VLAN info to fill to.
2796  *
2797  * @return
2798  *   0 on success, a negative errno value otherwise and rte_errno is set.
2799  */
2800 static void
2801 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
2802                                   struct rte_vlan_hdr *vlan)
2803 {
2804         const struct rte_flow_item_vlan nic_mask = {
2805                 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
2806                                 MLX5DV_FLOW_VLAN_VID_MASK),
2807                 .inner_type = RTE_BE16(0xffff),
2808         };
2809
2810         if (items == NULL)
2811                 return;
2812         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2813                 int type = items->type;
2814
2815                 if (type == RTE_FLOW_ITEM_TYPE_VLAN ||
2816                     type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
2817                         break;
2818         }
2819         if (items->type != RTE_FLOW_ITEM_TYPE_END) {
2820                 const struct rte_flow_item_vlan *vlan_m = items->mask;
2821                 const struct rte_flow_item_vlan *vlan_v = items->spec;
2822
2823                 /* If VLAN item in pattern doesn't contain data, return here. */
2824                 if (!vlan_v)
2825                         return;
2826                 if (!vlan_m)
2827                         vlan_m = &nic_mask;
2828                 /* Only full match values are accepted */
2829                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
2830                      MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
2831                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
2832                         vlan->vlan_tci |=
2833                                 rte_be_to_cpu_16(vlan_v->tci &
2834                                                  MLX5DV_FLOW_VLAN_PCP_MASK_BE);
2835                 }
2836                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
2837                      MLX5DV_FLOW_VLAN_VID_MASK_BE) {
2838                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
2839                         vlan->vlan_tci |=
2840                                 rte_be_to_cpu_16(vlan_v->tci &
2841                                                  MLX5DV_FLOW_VLAN_VID_MASK_BE);
2842                 }
2843                 if (vlan_m->inner_type == nic_mask.inner_type)
2844                         vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
2845                                                            vlan_m->inner_type);
2846         }
2847 }
2848
2849 /**
2850  * Validate the push VLAN action.
2851  *
2852  * @param[in] dev
2853  *   Pointer to the rte_eth_dev structure.
2854  * @param[in] action_flags
2855  *   Holds the actions detected until now.
2856  * @param[in] item_flags
2857  *   The items found in this flow rule.
2858  * @param[in] action
2859  *   Pointer to the action structure.
2860  * @param[in] attr
2861  *   Pointer to flow attributes
2862  * @param[out] error
2863  *   Pointer to error structure.
2864  *
2865  * @return
2866  *   0 on success, a negative errno value otherwise and rte_errno is set.
2867  */
2868 static int
2869 flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
2870                                   uint64_t action_flags,
2871                                   const struct rte_flow_item_vlan *vlan_m,
2872                                   const struct rte_flow_action *action,
2873                                   const struct rte_flow_attr *attr,
2874                                   struct rte_flow_error *error)
2875 {
2876         const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
2877         const struct mlx5_priv *priv = dev->data->dev_private;
2878
2879         if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
2880             push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
2881                 return rte_flow_error_set(error, EINVAL,
2882                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2883                                           "invalid vlan ethertype");
2884         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2885                 return rte_flow_error_set(error, EINVAL,
2886                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2887                                           "wrong action order, port_id should "
2888                                           "be after push VLAN");
2889         if (!attr->transfer && priv->representor)
2890                 return rte_flow_error_set(error, ENOTSUP,
2891                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2892                                           "push vlan action for VF representor "
2893                                           "not supported on NIC table");
2894         if (vlan_m &&
2895             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) &&
2896             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) !=
2897                 MLX5DV_FLOW_VLAN_PCP_MASK_BE &&
2898             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) &&
2899             !(mlx5_flow_find_action
2900                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)))
2901                 return rte_flow_error_set(error, EINVAL,
2902                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2903                                           "not full match mask on VLAN PCP and "
2904                                           "there is no of_set_vlan_pcp action, "
2905                                           "push VLAN action cannot figure out "
2906                                           "PCP value");
2907         if (vlan_m &&
2908             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) &&
2909             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) !=
2910                 MLX5DV_FLOW_VLAN_VID_MASK_BE &&
2911             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) &&
2912             !(mlx5_flow_find_action
2913                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)))
2914                 return rte_flow_error_set(error, EINVAL,
2915                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2916                                           "not full match mask on VLAN VID and "
2917                                           "there is no of_set_vlan_vid action, "
2918                                           "push VLAN action cannot figure out "
2919                                           "VID value");
2920         (void)attr;
2921         return 0;
2922 }
2923
2924 /**
2925  * Validate the set VLAN PCP.
2926  *
2927  * @param[in] action_flags
2928  *   Holds the actions detected until now.
2929  * @param[in] actions
2930  *   Pointer to the list of actions remaining in the flow rule.
2931  * @param[out] error
2932  *   Pointer to error structure.
2933  *
2934  * @return
2935  *   0 on success, a negative errno value otherwise and rte_errno is set.
2936  */
2937 static int
2938 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
2939                                      const struct rte_flow_action actions[],
2940                                      struct rte_flow_error *error)
2941 {
2942         const struct rte_flow_action *action = actions;
2943         const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
2944
2945         if (conf->vlan_pcp > 7)
2946                 return rte_flow_error_set(error, EINVAL,
2947                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2948                                           "VLAN PCP value is too big");
2949         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
2950                 return rte_flow_error_set(error, ENOTSUP,
2951                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2952                                           "set VLAN PCP action must follow "
2953                                           "the push VLAN action");
2954         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
2955                 return rte_flow_error_set(error, ENOTSUP,
2956                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2957                                           "Multiple VLAN PCP modification are "
2958                                           "not supported");
2959         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2960                 return rte_flow_error_set(error, EINVAL,
2961                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2962                                           "wrong action order, port_id should "
2963                                           "be after set VLAN PCP");
2964         return 0;
2965 }
2966
2967 /**
2968  * Validate the set VLAN VID.
2969  *
2970  * @param[in] item_flags
2971  *   Holds the items detected in this rule.
2972  * @param[in] action_flags
2973  *   Holds the actions detected until now.
2974  * @param[in] actions
2975  *   Pointer to the list of actions remaining in the flow rule.
2976  * @param[out] error
2977  *   Pointer to error structure.
2978  *
2979  * @return
2980  *   0 on success, a negative errno value otherwise and rte_errno is set.
2981  */
2982 static int
2983 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
2984                                      uint64_t action_flags,
2985                                      const struct rte_flow_action actions[],
2986                                      struct rte_flow_error *error)
2987 {
2988         const struct rte_flow_action *action = actions;
2989         const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
2990
2991         if (rte_be_to_cpu_16(conf->vlan_vid) > 0xFFE)
2992                 return rte_flow_error_set(error, EINVAL,
2993                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2994                                           "VLAN VID value is too big");
2995         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
2996             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2997                 return rte_flow_error_set(error, ENOTSUP,
2998                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2999                                           "set VLAN VID action must follow push"
3000                                           " VLAN action or match on VLAN item");
3001         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
3002                 return rte_flow_error_set(error, ENOTSUP,
3003                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3004                                           "Multiple VLAN VID modifications are "
3005                                           "not supported");
3006         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
3007                 return rte_flow_error_set(error, EINVAL,
3008                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3009                                           "wrong action order, port_id should "
3010                                           "be after set VLAN VID");
3011         return 0;
3012 }
3013
3014 /*
3015  * Validate the FLAG action.
3016  *
3017  * @param[in] dev
3018  *   Pointer to the rte_eth_dev structure.
3019  * @param[in] action_flags
3020  *   Holds the actions detected until now.
3021  * @param[in] attr
3022  *   Pointer to flow attributes
3023  * @param[out] error
3024  *   Pointer to error structure.
3025  *
3026  * @return
3027  *   0 on success, a negative errno value otherwise and rte_errno is set.
3028  */
3029 static int
3030 flow_dv_validate_action_flag(struct rte_eth_dev *dev,
3031                              uint64_t action_flags,
3032                              const struct rte_flow_attr *attr,
3033                              struct rte_flow_error *error)
3034 {
3035         struct mlx5_priv *priv = dev->data->dev_private;
3036         struct mlx5_dev_config *config = &priv->config;
3037         int ret;
3038
3039         /* Fall back if no extended metadata register support. */
3040         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
3041                 return mlx5_flow_validate_action_flag(action_flags, attr,
3042                                                       error);
3043         /* Extensive metadata mode requires registers. */
3044         if (!mlx5_flow_ext_mreg_supported(dev))
3045                 return rte_flow_error_set(error, ENOTSUP,
3046                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3047                                           "no metadata registers "
3048                                           "to support flag action");
3049         if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT))
3050                 return rte_flow_error_set(error, ENOTSUP,
3051                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3052                                           "extended metadata register"
3053                                           " isn't available");
3054         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3055         if (ret < 0)
3056                 return ret;
3057         MLX5_ASSERT(ret > 0);
3058         if (action_flags & MLX5_FLOW_ACTION_MARK)
3059                 return rte_flow_error_set(error, EINVAL,
3060                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3061                                           "can't mark and flag in same flow");
3062         if (action_flags & MLX5_FLOW_ACTION_FLAG)
3063                 return rte_flow_error_set(error, EINVAL,
3064                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3065                                           "can't have 2 flag"
3066                                           " actions in same flow");
3067         return 0;
3068 }
3069
3070 /**
3071  * Validate MARK action.
3072  *
3073  * @param[in] dev
3074  *   Pointer to the rte_eth_dev structure.
3075  * @param[in] action
3076  *   Pointer to action.
3077  * @param[in] action_flags
3078  *   Holds the actions detected until now.
3079  * @param[in] attr
3080  *   Pointer to flow attributes
3081  * @param[out] error
3082  *   Pointer to error structure.
3083  *
3084  * @return
3085  *   0 on success, a negative errno value otherwise and rte_errno is set.
3086  */
3087 static int
3088 flow_dv_validate_action_mark(struct rte_eth_dev *dev,
3089                              const struct rte_flow_action *action,
3090                              uint64_t action_flags,
3091                              const struct rte_flow_attr *attr,
3092                              struct rte_flow_error *error)
3093 {
3094         struct mlx5_priv *priv = dev->data->dev_private;
3095         struct mlx5_dev_config *config = &priv->config;
3096         const struct rte_flow_action_mark *mark = action->conf;
3097         int ret;
3098
3099         if (is_tunnel_offload_active(dev))
3100                 return rte_flow_error_set(error, ENOTSUP,
3101                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3102                                           "no mark action "
3103                                           "if tunnel offload active");
3104         /* Fall back if no extended metadata register support. */
3105         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
3106                 return mlx5_flow_validate_action_mark(action, action_flags,
3107                                                       attr, error);
3108         /* Extensive metadata mode requires registers. */
3109         if (!mlx5_flow_ext_mreg_supported(dev))
3110                 return rte_flow_error_set(error, ENOTSUP,
3111                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3112                                           "no metadata registers "
3113                                           "to support mark action");
3114         if (!priv->sh->dv_mark_mask)
3115                 return rte_flow_error_set(error, ENOTSUP,
3116                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3117                                           "extended metadata register"
3118                                           " isn't available");
3119         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3120         if (ret < 0)
3121                 return ret;
3122         MLX5_ASSERT(ret > 0);
3123         if (!mark)
3124                 return rte_flow_error_set(error, EINVAL,
3125                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3126                                           "configuration cannot be null");
3127         if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask))
3128                 return rte_flow_error_set(error, EINVAL,
3129                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3130                                           &mark->id,
3131                                           "mark id exceeds the limit");
3132         if (action_flags & MLX5_FLOW_ACTION_FLAG)
3133                 return rte_flow_error_set(error, EINVAL,
3134                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3135                                           "can't flag and mark in same flow");
3136         if (action_flags & MLX5_FLOW_ACTION_MARK)
3137                 return rte_flow_error_set(error, EINVAL,
3138                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3139                                           "can't have 2 mark actions in same"
3140                                           " flow");
3141         return 0;
3142 }
3143
3144 /**
3145  * Validate SET_META action.
3146  *
3147  * @param[in] dev
3148  *   Pointer to the rte_eth_dev structure.
3149  * @param[in] action
3150  *   Pointer to the action structure.
3151  * @param[in] action_flags
3152  *   Holds the actions detected until now.
3153  * @param[in] attr
3154  *   Pointer to flow attributes
3155  * @param[out] error
3156  *   Pointer to error structure.
3157  *
3158  * @return
3159  *   0 on success, a negative errno value otherwise and rte_errno is set.
3160  */
3161 static int
3162 flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
3163                                  const struct rte_flow_action *action,
3164                                  uint64_t action_flags __rte_unused,
3165                                  const struct rte_flow_attr *attr,
3166                                  struct rte_flow_error *error)
3167 {
3168         const struct rte_flow_action_set_meta *conf;
3169         uint32_t nic_mask = UINT32_MAX;
3170         int reg;
3171
3172         if (!mlx5_flow_ext_mreg_supported(dev))
3173                 return rte_flow_error_set(error, ENOTSUP,
3174                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3175                                           "extended metadata register"
3176                                           " isn't supported");
3177         reg = flow_dv_get_metadata_reg(dev, attr, error);
3178         if (reg < 0)
3179                 return reg;
3180         if (reg == REG_NON)
3181                 return rte_flow_error_set(error, ENOTSUP,
3182                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3183                                           "unavalable extended metadata register");
3184         if (reg != REG_A && reg != REG_B) {
3185                 struct mlx5_priv *priv = dev->data->dev_private;
3186
3187                 nic_mask = priv->sh->dv_meta_mask;
3188         }
3189         if (!(action->conf))
3190                 return rte_flow_error_set(error, EINVAL,
3191                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3192                                           "configuration cannot be null");
3193         conf = (const struct rte_flow_action_set_meta *)action->conf;
3194         if (!conf->mask)
3195                 return rte_flow_error_set(error, EINVAL,
3196                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3197                                           "zero mask doesn't have any effect");
3198         if (conf->mask & ~nic_mask)
3199                 return rte_flow_error_set(error, EINVAL,
3200                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3201                                           "meta data must be within reg C0");
3202         return 0;
3203 }
3204
3205 /**
3206  * Validate SET_TAG action.
3207  *
3208  * @param[in] dev
3209  *   Pointer to the rte_eth_dev structure.
3210  * @param[in] action
3211  *   Pointer to the action structure.
3212  * @param[in] action_flags
3213  *   Holds the actions detected until now.
3214  * @param[in] attr
3215  *   Pointer to flow attributes
3216  * @param[out] error
3217  *   Pointer to error structure.
3218  *
3219  * @return
3220  *   0 on success, a negative errno value otherwise and rte_errno is set.
3221  */
3222 static int
3223 flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
3224                                 const struct rte_flow_action *action,
3225                                 uint64_t action_flags,
3226                                 const struct rte_flow_attr *attr,
3227                                 struct rte_flow_error *error)
3228 {
3229         const struct rte_flow_action_set_tag *conf;
3230         const uint64_t terminal_action_flags =
3231                 MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE |
3232                 MLX5_FLOW_ACTION_RSS;
3233         int ret;
3234
3235         if (!mlx5_flow_ext_mreg_supported(dev))
3236                 return rte_flow_error_set(error, ENOTSUP,
3237                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3238                                           "extensive metadata register"
3239                                           " isn't supported");
3240         if (!(action->conf))
3241                 return rte_flow_error_set(error, EINVAL,
3242                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3243                                           "configuration cannot be null");
3244         conf = (const struct rte_flow_action_set_tag *)action->conf;
3245         if (!conf->mask)
3246                 return rte_flow_error_set(error, EINVAL,
3247                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3248                                           "zero mask doesn't have any effect");
3249         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
3250         if (ret < 0)
3251                 return ret;
3252         if (!attr->transfer && attr->ingress &&
3253             (action_flags & terminal_action_flags))
3254                 return rte_flow_error_set(error, EINVAL,
3255                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3256                                           "set_tag has no effect"
3257                                           " with terminal actions");
3258         return 0;
3259 }
3260
3261 /**
3262  * Check if action counter is shared by either old or new mechanism.
3263  *
3264  * @param[in] action
3265  *   Pointer to the action structure.
3266  *
3267  * @return
3268  *   True when counter is shared, false otherwise.
3269  */
3270 static inline bool
3271 is_shared_action_count(const struct rte_flow_action *action)
3272 {
3273         const struct rte_flow_action_count *count =
3274                         (const struct rte_flow_action_count *)action->conf;
3275
3276         if ((int)action->type == MLX5_RTE_FLOW_ACTION_TYPE_COUNT)
3277                 return true;
3278         return !!(count && count->shared);
3279 }
3280
3281 /**
3282  * Validate count action.
3283  *
3284  * @param[in] dev
3285  *   Pointer to rte_eth_dev structure.
3286  * @param[in] shared
3287  *   Indicator if action is shared.
3288  * @param[in] action_flags
3289  *   Holds the actions detected until now.
3290  * @param[out] error
3291  *   Pointer to error structure.
3292  *
3293  * @return
3294  *   0 on success, a negative errno value otherwise and rte_errno is set.
3295  */
3296 static int
3297 flow_dv_validate_action_count(struct rte_eth_dev *dev, bool shared,
3298                               uint64_t action_flags,
3299                               struct rte_flow_error *error)
3300 {
3301         struct mlx5_priv *priv = dev->data->dev_private;
3302
3303         if (!priv->config.devx)
3304                 goto notsup_err;
3305         if (action_flags & MLX5_FLOW_ACTION_COUNT)
3306                 return rte_flow_error_set(error, EINVAL,
3307                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3308                                           "duplicate count actions set");
3309         if (shared && (action_flags & MLX5_FLOW_ACTION_AGE) &&
3310             !priv->sh->flow_hit_aso_en)
3311                 return rte_flow_error_set(error, EINVAL,
3312                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3313                                           "old age and shared count combination is not supported");
3314 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
3315         return 0;
3316 #endif
3317 notsup_err:
3318         return rte_flow_error_set
3319                       (error, ENOTSUP,
3320                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3321                        NULL,
3322                        "count action not supported");
3323 }
3324
3325 /**
3326  * Validate the L2 encap action.
3327  *
3328  * @param[in] dev
3329  *   Pointer to the rte_eth_dev structure.
3330  * @param[in] action_flags
3331  *   Holds the actions detected until now.
3332  * @param[in] action
3333  *   Pointer to the action structure.
3334  * @param[in] attr
3335  *   Pointer to flow attributes.
3336  * @param[out] error
3337  *   Pointer to error structure.
3338  *
3339  * @return
3340  *   0 on success, a negative errno value otherwise and rte_errno is set.
3341  */
3342 static int
3343 flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
3344                                  uint64_t action_flags,
3345                                  const struct rte_flow_action *action,
3346                                  const struct rte_flow_attr *attr,
3347                                  struct rte_flow_error *error)
3348 {
3349         const struct mlx5_priv *priv = dev->data->dev_private;
3350
3351         if (!(action->conf))
3352                 return rte_flow_error_set(error, EINVAL,
3353                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3354                                           "configuration cannot be null");
3355         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3356                 return rte_flow_error_set(error, EINVAL,
3357                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3358                                           "can only have a single encap action "
3359                                           "in a flow");
3360         if (!attr->transfer && priv->representor)
3361                 return rte_flow_error_set(error, ENOTSUP,
3362                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3363                                           "encap action for VF representor "
3364                                           "not supported on NIC table");
3365         return 0;
3366 }
3367
3368 /**
3369  * Validate a decap action.
3370  *
3371  * @param[in] dev
3372  *   Pointer to the rte_eth_dev structure.
3373  * @param[in] action_flags
3374  *   Holds the actions detected until now.
3375  * @param[in] action
3376  *   Pointer to the action structure.
3377  * @param[in] item_flags
3378  *   Holds the items detected.
3379  * @param[in] attr
3380  *   Pointer to flow attributes
3381  * @param[out] error
3382  *   Pointer to error structure.
3383  *
3384  * @return
3385  *   0 on success, a negative errno value otherwise and rte_errno is set.
3386  */
3387 static int
3388 flow_dv_validate_action_decap(struct rte_eth_dev *dev,
3389                               uint64_t action_flags,
3390                               const struct rte_flow_action *action,
3391                               const uint64_t item_flags,
3392                               const struct rte_flow_attr *attr,
3393                               struct rte_flow_error *error)
3394 {
3395         const struct mlx5_priv *priv = dev->data->dev_private;
3396
3397         if (priv->config.hca_attr.scatter_fcs_w_decap_disable &&
3398             !priv->config.decap_en)
3399                 return rte_flow_error_set(error, ENOTSUP,
3400                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3401                                           "decap is not enabled");
3402         if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
3403                 return rte_flow_error_set(error, ENOTSUP,
3404                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3405                                           action_flags &
3406                                           MLX5_FLOW_ACTION_DECAP ? "can only "
3407                                           "have a single decap action" : "decap "
3408                                           "after encap is not supported");
3409         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
3410                 return rte_flow_error_set(error, EINVAL,
3411                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3412                                           "can't have decap action after"
3413                                           " modify action");
3414         if (attr->egress)
3415                 return rte_flow_error_set(error, ENOTSUP,
3416                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
3417                                           NULL,
3418                                           "decap action not supported for "
3419                                           "egress");
3420         if (!attr->transfer && priv->representor)
3421                 return rte_flow_error_set(error, ENOTSUP,
3422                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3423                                           "decap action for VF representor "
3424                                           "not supported on NIC table");
3425         if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP &&
3426             !(item_flags & MLX5_FLOW_LAYER_VXLAN))
3427                 return rte_flow_error_set(error, ENOTSUP,
3428                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3429                                 "VXLAN item should be present for VXLAN decap");
3430         return 0;
3431 }
3432
3433 const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,};
3434
3435 /**
3436  * Validate the raw encap and decap actions.
3437  *
3438  * @param[in] dev
3439  *   Pointer to the rte_eth_dev structure.
3440  * @param[in] decap
3441  *   Pointer to the decap action.
3442  * @param[in] encap
3443  *   Pointer to the encap action.
3444  * @param[in] attr
3445  *   Pointer to flow attributes
3446  * @param[in/out] action_flags
3447  *   Holds the actions detected until now.
3448  * @param[out] actions_n
3449  *   pointer to the number of actions counter.
3450  * @param[in] action
3451  *   Pointer to the action structure.
3452  * @param[in] item_flags
3453  *   Holds the items detected.
3454  * @param[out] error
3455  *   Pointer to error structure.
3456  *
3457  * @return
3458  *   0 on success, a negative errno value otherwise and rte_errno is set.
3459  */
3460 static int
3461 flow_dv_validate_action_raw_encap_decap
3462         (struct rte_eth_dev *dev,
3463          const struct rte_flow_action_raw_decap *decap,
3464          const struct rte_flow_action_raw_encap *encap,
3465          const struct rte_flow_attr *attr, uint64_t *action_flags,
3466          int *actions_n, const struct rte_flow_action *action,
3467          uint64_t item_flags, struct rte_flow_error *error)
3468 {
3469         const struct mlx5_priv *priv = dev->data->dev_private;
3470         int ret;
3471
3472         if (encap && (!encap->size || !encap->data))
3473                 return rte_flow_error_set(error, EINVAL,
3474                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3475                                           "raw encap data cannot be empty");
3476         if (decap && encap) {
3477                 if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE &&
3478                     encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
3479                         /* L3 encap. */
3480                         decap = NULL;
3481                 else if (encap->size <=
3482                            MLX5_ENCAPSULATION_DECISION_SIZE &&
3483                            decap->size >
3484                            MLX5_ENCAPSULATION_DECISION_SIZE)
3485                         /* L3 decap. */
3486                         encap = NULL;
3487                 else if (encap->size >
3488                            MLX5_ENCAPSULATION_DECISION_SIZE &&
3489                            decap->size >
3490                            MLX5_ENCAPSULATION_DECISION_SIZE)
3491                         /* 2 L2 actions: encap and decap. */
3492                         ;
3493                 else
3494                         return rte_flow_error_set(error,
3495                                 ENOTSUP,
3496                                 RTE_FLOW_ERROR_TYPE_ACTION,
3497                                 NULL, "unsupported too small "
3498                                 "raw decap and too small raw "
3499                                 "encap combination");
3500         }
3501         if (decap) {
3502                 ret = flow_dv_validate_action_decap(dev, *action_flags, action,
3503                                                     item_flags, attr, error);
3504                 if (ret < 0)
3505                         return ret;
3506                 *action_flags |= MLX5_FLOW_ACTION_DECAP;
3507                 ++(*actions_n);
3508         }
3509         if (encap) {
3510                 if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE)
3511                         return rte_flow_error_set(error, ENOTSUP,
3512                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3513                                                   NULL,
3514                                                   "small raw encap size");
3515                 if (*action_flags & MLX5_FLOW_ACTION_ENCAP)
3516                         return rte_flow_error_set(error, EINVAL,
3517                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3518                                                   NULL,
3519                                                   "more than one encap action");
3520                 if (!attr->transfer && priv->representor)
3521                         return rte_flow_error_set
3522                                         (error, ENOTSUP,
3523                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3524                                          "encap action for VF representor "
3525                                          "not supported on NIC table");
3526                 *action_flags |= MLX5_FLOW_ACTION_ENCAP;
3527                 ++(*actions_n);
3528         }
3529         return 0;
3530 }
3531
3532 /*
3533  * Validate the ASO CT action.
3534  *
3535  * @param[in] dev
3536  *   Pointer to the rte_eth_dev structure.
3537  * @param[in] action_flags
3538  *   Holds the actions detected until now.
3539  * @param[in] item_flags
3540  *   The items found in this flow rule.
3541  * @param[in] attr
3542  *   Pointer to flow attributes.
3543  * @param[out] error
3544  *   Pointer to error structure.
3545  *
3546  * @return
3547  *   0 on success, a negative errno value otherwise and rte_errno is set.
3548  */
3549 static int
3550 flow_dv_validate_action_aso_ct(struct rte_eth_dev *dev,
3551                                uint64_t action_flags,
3552                                uint64_t item_flags,
3553                                const struct rte_flow_attr *attr,
3554                                struct rte_flow_error *error)
3555 {
3556         RTE_SET_USED(dev);
3557
3558         if (attr->group == 0 && !attr->transfer)
3559                 return rte_flow_error_set(error, ENOTSUP,
3560                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3561                                           NULL,
3562                                           "Only support non-root table");
3563         if (action_flags & MLX5_FLOW_FATE_ACTIONS)
3564                 return rte_flow_error_set(error, ENOTSUP,
3565                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3566                                           "CT cannot follow a fate action");
3567         if ((action_flags & MLX5_FLOW_ACTION_METER) ||
3568             (action_flags & MLX5_FLOW_ACTION_AGE))
3569                 return rte_flow_error_set(error, EINVAL,
3570                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3571                                           "Only one ASO action is supported");
3572         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3573                 return rte_flow_error_set(error, EINVAL,
3574                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3575                                           "Encap cannot exist before CT");
3576         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
3577                 return rte_flow_error_set(error, EINVAL,
3578                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3579                                           "Not a outer TCP packet");
3580         return 0;
3581 }
3582
3583 /**
3584  * Match encap_decap resource.
3585  *
3586  * @param list
3587  *   Pointer to the hash list.
3588  * @param entry
3589  *   Pointer to exist resource entry object.
3590  * @param key
3591  *   Key of the new entry.
3592  * @param ctx_cb
3593  *   Pointer to new encap_decap resource.
3594  *
3595  * @return
3596  *   0 on matching, none-zero otherwise.
3597  */
3598 int
3599 flow_dv_encap_decap_match_cb(struct mlx5_hlist *list __rte_unused,
3600                              struct mlx5_hlist_entry *entry,
3601                              uint64_t key __rte_unused, void *cb_ctx)
3602 {
3603         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3604         struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
3605         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
3606
3607         cache_resource = container_of(entry,
3608                                       struct mlx5_flow_dv_encap_decap_resource,
3609                                       entry);
3610         if (resource->reformat_type == cache_resource->reformat_type &&
3611             resource->ft_type == cache_resource->ft_type &&
3612             resource->flags == cache_resource->flags &&
3613             resource->size == cache_resource->size &&
3614             !memcmp((const void *)resource->buf,
3615                     (const void *)cache_resource->buf,
3616                     resource->size))
3617                 return 0;
3618         return -1;
3619 }
3620
3621 /**
3622  * Allocate encap_decap resource.
3623  *
3624  * @param list
3625  *   Pointer to the hash list.
3626  * @param entry
3627  *   Pointer to exist resource entry object.
3628  * @param ctx_cb
3629  *   Pointer to new encap_decap resource.
3630  *
3631  * @return
3632  *   0 on matching, none-zero otherwise.
3633  */
3634 struct mlx5_hlist_entry *
3635 flow_dv_encap_decap_create_cb(struct mlx5_hlist *list,
3636                               uint64_t key __rte_unused,
3637                               void *cb_ctx)
3638 {
3639         struct mlx5_dev_ctx_shared *sh = list->ctx;
3640         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3641         struct mlx5dv_dr_domain *domain;
3642         struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
3643         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
3644         uint32_t idx;
3645         int ret;
3646
3647         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3648                 domain = sh->fdb_domain;
3649         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3650                 domain = sh->rx_domain;
3651         else
3652                 domain = sh->tx_domain;
3653         /* Register new encap/decap resource. */
3654         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
3655                                        &idx);
3656         if (!cache_resource) {
3657                 rte_flow_error_set(ctx->error, ENOMEM,
3658                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3659                                    "cannot allocate resource memory");
3660                 return NULL;
3661         }
3662         *cache_resource = *resource;
3663         cache_resource->idx = idx;
3664         ret = mlx5_flow_os_create_flow_action_packet_reformat
3665                                         (sh->ctx, domain, cache_resource,
3666                                          &cache_resource->action);
3667         if (ret) {
3668                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
3669                 rte_flow_error_set(ctx->error, ENOMEM,
3670                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3671                                    NULL, "cannot create action");
3672                 return NULL;
3673         }
3674
3675         return &cache_resource->entry;
3676 }
3677
3678 /**
3679  * Find existing encap/decap resource or create and register a new one.
3680  *
3681  * @param[in, out] dev
3682  *   Pointer to rte_eth_dev structure.
3683  * @param[in, out] resource
3684  *   Pointer to encap/decap resource.
3685  * @parm[in, out] dev_flow
3686  *   Pointer to the dev_flow.
3687  * @param[out] error
3688  *   pointer to error structure.
3689  *
3690  * @return
3691  *   0 on success otherwise -errno and errno is set.
3692  */
3693 static int
3694 flow_dv_encap_decap_resource_register
3695                         (struct rte_eth_dev *dev,
3696                          struct mlx5_flow_dv_encap_decap_resource *resource,
3697                          struct mlx5_flow *dev_flow,
3698                          struct rte_flow_error *error)
3699 {
3700         struct mlx5_priv *priv = dev->data->dev_private;
3701         struct mlx5_dev_ctx_shared *sh = priv->sh;
3702         struct mlx5_hlist_entry *entry;
3703         union {
3704                 struct {
3705                         uint32_t ft_type:8;
3706                         uint32_t refmt_type:8;
3707                         /*
3708                          * Header reformat actions can be shared between
3709                          * non-root tables. One bit to indicate non-root
3710                          * table or not.
3711                          */
3712                         uint32_t is_root:1;
3713                         uint32_t reserve:15;
3714                 };
3715                 uint32_t v32;
3716         } encap_decap_key = {
3717                 {
3718                         .ft_type = resource->ft_type,
3719                         .refmt_type = resource->reformat_type,
3720                         .is_root = !!dev_flow->dv.group,
3721                         .reserve = 0,
3722                 }
3723         };
3724         struct mlx5_flow_cb_ctx ctx = {
3725                 .error = error,
3726                 .data = resource,
3727         };
3728         uint64_t key64;
3729
3730         resource->flags = dev_flow->dv.group ? 0 : 1;
3731         key64 =  __rte_raw_cksum(&encap_decap_key.v32,
3732                                  sizeof(encap_decap_key.v32), 0);
3733         if (resource->reformat_type !=
3734             MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2 &&
3735             resource->size)
3736                 key64 = __rte_raw_cksum(resource->buf, resource->size, key64);
3737         entry = mlx5_hlist_register(sh->encaps_decaps, key64, &ctx);
3738         if (!entry)
3739                 return -rte_errno;
3740         resource = container_of(entry, typeof(*resource), entry);
3741         dev_flow->dv.encap_decap = resource;
3742         dev_flow->handle->dvh.rix_encap_decap = resource->idx;
3743         return 0;
3744 }
3745
3746 /**
3747  * Find existing table jump resource or create and register a new one.
3748  *
3749  * @param[in, out] dev
3750  *   Pointer to rte_eth_dev structure.
3751  * @param[in, out] tbl
3752  *   Pointer to flow table resource.
3753  * @parm[in, out] dev_flow
3754  *   Pointer to the dev_flow.
3755  * @param[out] error
3756  *   pointer to error structure.
3757  *
3758  * @return
3759  *   0 on success otherwise -errno and errno is set.
3760  */
3761 static int
3762 flow_dv_jump_tbl_resource_register
3763                         (struct rte_eth_dev *dev __rte_unused,
3764                          struct mlx5_flow_tbl_resource *tbl,
3765                          struct mlx5_flow *dev_flow,
3766                          struct rte_flow_error *error __rte_unused)
3767 {
3768         struct mlx5_flow_tbl_data_entry *tbl_data =
3769                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
3770
3771         MLX5_ASSERT(tbl);
3772         MLX5_ASSERT(tbl_data->jump.action);
3773         dev_flow->handle->rix_jump = tbl_data->idx;
3774         dev_flow->dv.jump = &tbl_data->jump;
3775         return 0;
3776 }
3777
3778 int
3779 flow_dv_port_id_match_cb(struct mlx5_cache_list *list __rte_unused,
3780                          struct mlx5_cache_entry *entry, void *cb_ctx)
3781 {
3782         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3783         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3784         struct mlx5_flow_dv_port_id_action_resource *res =
3785                         container_of(entry, typeof(*res), entry);
3786
3787         return ref->port_id != res->port_id;
3788 }
3789
3790 struct mlx5_cache_entry *
3791 flow_dv_port_id_create_cb(struct mlx5_cache_list *list,
3792                           struct mlx5_cache_entry *entry __rte_unused,
3793                           void *cb_ctx)
3794 {
3795         struct mlx5_dev_ctx_shared *sh = list->ctx;
3796         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3797         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3798         struct mlx5_flow_dv_port_id_action_resource *cache;
3799         uint32_t idx;
3800         int ret;
3801
3802         /* Register new port id action resource. */
3803         cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
3804         if (!cache) {
3805                 rte_flow_error_set(ctx->error, ENOMEM,
3806                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3807                                    "cannot allocate port_id action cache memory");
3808                 return NULL;
3809         }
3810         *cache = *ref;
3811         ret = mlx5_flow_os_create_flow_action_dest_port(sh->fdb_domain,
3812                                                         ref->port_id,
3813                                                         &cache->action);
3814         if (ret) {
3815                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], idx);
3816                 rte_flow_error_set(ctx->error, ENOMEM,
3817                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3818                                    "cannot create action");
3819                 return NULL;
3820         }
3821         cache->idx = idx;
3822         return &cache->entry;
3823 }
3824
3825 /**
3826  * Find existing table port ID resource or create and register a new one.
3827  *
3828  * @param[in, out] dev
3829  *   Pointer to rte_eth_dev structure.
3830  * @param[in, out] resource
3831  *   Pointer to port ID action resource.
3832  * @parm[in, out] dev_flow
3833  *   Pointer to the dev_flow.
3834  * @param[out] error
3835  *   pointer to error structure.
3836  *
3837  * @return
3838  *   0 on success otherwise -errno and errno is set.
3839  */
3840 static int
3841 flow_dv_port_id_action_resource_register
3842                         (struct rte_eth_dev *dev,
3843                          struct mlx5_flow_dv_port_id_action_resource *resource,
3844                          struct mlx5_flow *dev_flow,
3845                          struct rte_flow_error *error)
3846 {
3847         struct mlx5_priv *priv = dev->data->dev_private;
3848         struct mlx5_cache_entry *entry;
3849         struct mlx5_flow_dv_port_id_action_resource *cache;
3850         struct mlx5_flow_cb_ctx ctx = {
3851                 .error = error,
3852                 .data = resource,
3853         };
3854
3855         entry = mlx5_cache_register(&priv->sh->port_id_action_list, &ctx);
3856         if (!entry)
3857                 return -rte_errno;
3858         cache = container_of(entry, typeof(*cache), entry);
3859         dev_flow->dv.port_id_action = cache;
3860         dev_flow->handle->rix_port_id_action = cache->idx;
3861         return 0;
3862 }
3863
3864 int
3865 flow_dv_push_vlan_match_cb(struct mlx5_cache_list *list __rte_unused,
3866                          struct mlx5_cache_entry *entry, void *cb_ctx)
3867 {
3868         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3869         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3870         struct mlx5_flow_dv_push_vlan_action_resource *res =
3871                         container_of(entry, typeof(*res), entry);
3872
3873         return ref->vlan_tag != res->vlan_tag || ref->ft_type != res->ft_type;
3874 }
3875
3876 struct mlx5_cache_entry *
3877 flow_dv_push_vlan_create_cb(struct mlx5_cache_list *list,
3878                           struct mlx5_cache_entry *entry __rte_unused,
3879                           void *cb_ctx)
3880 {
3881         struct mlx5_dev_ctx_shared *sh = list->ctx;
3882         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3883         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3884         struct mlx5_flow_dv_push_vlan_action_resource *cache;
3885         struct mlx5dv_dr_domain *domain;
3886         uint32_t idx;
3887         int ret;
3888
3889         /* Register new port id action resource. */
3890         cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
3891         if (!cache) {
3892                 rte_flow_error_set(ctx->error, ENOMEM,
3893                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3894                                    "cannot allocate push_vlan action cache memory");
3895                 return NULL;
3896         }
3897         *cache = *ref;
3898         if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3899                 domain = sh->fdb_domain;
3900         else if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3901                 domain = sh->rx_domain;
3902         else
3903                 domain = sh->tx_domain;
3904         ret = mlx5_flow_os_create_flow_action_push_vlan(domain, ref->vlan_tag,
3905                                                         &cache->action);
3906         if (ret) {
3907                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
3908                 rte_flow_error_set(ctx->error, ENOMEM,
3909                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3910                                    "cannot create push vlan action");
3911                 return NULL;
3912         }
3913         cache->idx = idx;
3914         return &cache->entry;
3915 }
3916
3917 /**
3918  * Find existing push vlan resource or create and register a new one.
3919  *
3920  * @param [in, out] dev
3921  *   Pointer to rte_eth_dev structure.
3922  * @param[in, out] resource
3923  *   Pointer to port ID action resource.
3924  * @parm[in, out] dev_flow
3925  *   Pointer to the dev_flow.
3926  * @param[out] error
3927  *   pointer to error structure.
3928  *
3929  * @return
3930  *   0 on success otherwise -errno and errno is set.
3931  */
3932 static int
3933 flow_dv_push_vlan_action_resource_register
3934                        (struct rte_eth_dev *dev,
3935                         struct mlx5_flow_dv_push_vlan_action_resource *resource,
3936                         struct mlx5_flow *dev_flow,
3937                         struct rte_flow_error *error)
3938 {
3939         struct mlx5_priv *priv = dev->data->dev_private;
3940         struct mlx5_flow_dv_push_vlan_action_resource *cache;
3941         struct mlx5_cache_entry *entry;
3942         struct mlx5_flow_cb_ctx ctx = {
3943                 .error = error,
3944                 .data = resource,
3945         };
3946
3947         entry = mlx5_cache_register(&priv->sh->push_vlan_action_list, &ctx);
3948         if (!entry)
3949                 return -rte_errno;
3950         cache = container_of(entry, typeof(*cache), entry);
3951
3952         dev_flow->handle->dvh.rix_push_vlan = cache->idx;
3953         dev_flow->dv.push_vlan_res = cache;
3954         return 0;
3955 }
3956
3957 /**
3958  * Get the size of specific rte_flow_item_type hdr size
3959  *
3960  * @param[in] item_type
3961  *   Tested rte_flow_item_type.
3962  *
3963  * @return
3964  *   sizeof struct item_type, 0 if void or irrelevant.
3965  */
3966 static size_t
3967 flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type)
3968 {
3969         size_t retval;
3970
3971         switch (item_type) {
3972         case RTE_FLOW_ITEM_TYPE_ETH:
3973                 retval = sizeof(struct rte_ether_hdr);
3974                 break;
3975         case RTE_FLOW_ITEM_TYPE_VLAN:
3976                 retval = sizeof(struct rte_vlan_hdr);
3977                 break;
3978         case RTE_FLOW_ITEM_TYPE_IPV4:
3979                 retval = sizeof(struct rte_ipv4_hdr);
3980                 break;
3981         case RTE_FLOW_ITEM_TYPE_IPV6:
3982                 retval = sizeof(struct rte_ipv6_hdr);
3983                 break;
3984         case RTE_FLOW_ITEM_TYPE_UDP:
3985                 retval = sizeof(struct rte_udp_hdr);
3986                 break;
3987         case RTE_FLOW_ITEM_TYPE_TCP:
3988                 retval = sizeof(struct rte_tcp_hdr);
3989                 break;
3990         case RTE_FLOW_ITEM_TYPE_VXLAN:
3991         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3992                 retval = sizeof(struct rte_vxlan_hdr);
3993                 break;
3994         case RTE_FLOW_ITEM_TYPE_GRE:
3995         case RTE_FLOW_ITEM_TYPE_NVGRE:
3996                 retval = sizeof(struct rte_gre_hdr);
3997                 break;
3998         case RTE_FLOW_ITEM_TYPE_MPLS:
3999                 retval = sizeof(struct rte_mpls_hdr);
4000                 break;
4001         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
4002         default:
4003                 retval = 0;
4004                 break;
4005         }
4006         return retval;
4007 }
4008
4009 #define MLX5_ENCAP_IPV4_VERSION         0x40
4010 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
4011 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
4012 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
4013 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
4014 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
4015 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
4016
4017 /**
4018  * Convert the encap action data from list of rte_flow_item to raw buffer
4019  *
4020  * @param[in] items
4021  *   Pointer to rte_flow_item objects list.
4022  * @param[out] buf
4023  *   Pointer to the output buffer.
4024  * @param[out] size
4025  *   Pointer to the output buffer size.
4026  * @param[out] error
4027  *   Pointer to the error structure.
4028  *
4029  * @return
4030  *   0 on success, a negative errno value otherwise and rte_errno is set.
4031  */
4032 static int
4033 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
4034                            size_t *size, struct rte_flow_error *error)
4035 {
4036         struct rte_ether_hdr *eth = NULL;
4037         struct rte_vlan_hdr *vlan = NULL;
4038         struct rte_ipv4_hdr *ipv4 = NULL;
4039         struct rte_ipv6_hdr *ipv6 = NULL;
4040         struct rte_udp_hdr *udp = NULL;
4041         struct rte_vxlan_hdr *vxlan = NULL;
4042         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
4043         struct rte_gre_hdr *gre = NULL;
4044         size_t len;
4045         size_t temp_size = 0;
4046
4047         if (!items)
4048                 return rte_flow_error_set(error, EINVAL,
4049                                           RTE_FLOW_ERROR_TYPE_ACTION,
4050                                           NULL, "invalid empty data");
4051         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
4052                 len = flow_dv_get_item_hdr_len(items->type);
4053                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
4054                         return rte_flow_error_set(error, EINVAL,
4055                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4056                                                   (void *)items->type,
4057                                                   "items total size is too big"
4058                                                   " for encap action");
4059                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
4060                 switch (items->type) {
4061                 case RTE_FLOW_ITEM_TYPE_ETH:
4062                         eth = (struct rte_ether_hdr *)&buf[temp_size];
4063                         break;
4064                 case RTE_FLOW_ITEM_TYPE_VLAN:
4065                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
4066                         if (!eth)
4067                                 return rte_flow_error_set(error, EINVAL,
4068                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4069                                                 (void *)items->type,
4070                                                 "eth header not found");
4071                         if (!eth->ether_type)
4072                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
4073                         break;
4074                 case RTE_FLOW_ITEM_TYPE_IPV4:
4075                         ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
4076                         if (!vlan && !eth)
4077                                 return rte_flow_error_set(error, EINVAL,
4078                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4079                                                 (void *)items->type,
4080                                                 "neither eth nor vlan"
4081                                                 " header found");
4082                         if (vlan && !vlan->eth_proto)
4083                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
4084                         else if (eth && !eth->ether_type)
4085                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
4086                         if (!ipv4->version_ihl)
4087                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
4088                                                     MLX5_ENCAP_IPV4_IHL_MIN;
4089                         if (!ipv4->time_to_live)
4090                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
4091                         break;
4092                 case RTE_FLOW_ITEM_TYPE_IPV6:
4093                         ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
4094                         if (!vlan && !eth)
4095                                 return rte_flow_error_set(error, EINVAL,
4096                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4097                                                 (void *)items->type,
4098                                                 "neither eth nor vlan"
4099                                                 " header found");
4100                         if (vlan && !vlan->eth_proto)
4101                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
4102                         else if (eth && !eth->ether_type)
4103                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
4104                         if (!ipv6->vtc_flow)
4105                                 ipv6->vtc_flow =
4106                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
4107                         if (!ipv6->hop_limits)
4108                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
4109                         break;
4110                 case RTE_FLOW_ITEM_TYPE_UDP:
4111                         udp = (struct rte_udp_hdr *)&buf[temp_size];
4112                         if (!ipv4 && !ipv6)
4113                                 return rte_flow_error_set(error, EINVAL,
4114                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4115                                                 (void *)items->type,
4116                                                 "ip header not found");
4117                         if (ipv4 && !ipv4->next_proto_id)
4118                                 ipv4->next_proto_id = IPPROTO_UDP;
4119                         else if (ipv6 && !ipv6->proto)
4120                                 ipv6->proto = IPPROTO_UDP;
4121                         break;
4122                 case RTE_FLOW_ITEM_TYPE_VXLAN:
4123                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
4124                         if (!udp)
4125                                 return rte_flow_error_set(error, EINVAL,
4126                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4127                                                 (void *)items->type,
4128                                                 "udp header not found");
4129                         if (!udp->dst_port)
4130                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
4131                         if (!vxlan->vx_flags)
4132                                 vxlan->vx_flags =
4133                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
4134                         break;
4135                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
4136                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
4137                         if (!udp)
4138                                 return rte_flow_error_set(error, EINVAL,
4139                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4140                                                 (void *)items->type,
4141                                                 "udp header not found");
4142                         if (!vxlan_gpe->proto)
4143                                 return rte_flow_error_set(error, EINVAL,
4144                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4145                                                 (void *)items->type,
4146                                                 "next protocol not found");
4147                         if (!udp->dst_port)
4148                                 udp->dst_port =
4149                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
4150                         if (!vxlan_gpe->vx_flags)
4151                                 vxlan_gpe->vx_flags =
4152                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
4153                         break;
4154                 case RTE_FLOW_ITEM_TYPE_GRE:
4155                 case RTE_FLOW_ITEM_TYPE_NVGRE:
4156                         gre = (struct rte_gre_hdr *)&buf[temp_size];
4157                         if (!gre->proto)
4158                                 return rte_flow_error_set(error, EINVAL,
4159                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4160                                                 (void *)items->type,
4161                                                 "next protocol not found");
4162                         if (!ipv4 && !ipv6)
4163                                 return rte_flow_error_set(error, EINVAL,
4164                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4165                                                 (void *)items->type,
4166                                                 "ip header not found");
4167                         if (ipv4 && !ipv4->next_proto_id)
4168                                 ipv4->next_proto_id = IPPROTO_GRE;
4169                         else if (ipv6 && !ipv6->proto)
4170                                 ipv6->proto = IPPROTO_GRE;
4171                         break;
4172                 case RTE_FLOW_ITEM_TYPE_VOID:
4173                         break;
4174                 default:
4175                         return rte_flow_error_set(error, EINVAL,
4176                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4177                                                   (void *)items->type,
4178                                                   "unsupported item type");
4179                         break;
4180                 }
4181                 temp_size += len;
4182         }
4183         *size = temp_size;
4184         return 0;
4185 }
4186
4187 static int
4188 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
4189 {
4190         struct rte_ether_hdr *eth = NULL;
4191         struct rte_vlan_hdr *vlan = NULL;
4192         struct rte_ipv6_hdr *ipv6 = NULL;
4193         struct rte_udp_hdr *udp = NULL;
4194         char *next_hdr;
4195         uint16_t proto;
4196
4197         eth = (struct rte_ether_hdr *)data;
4198         next_hdr = (char *)(eth + 1);
4199         proto = RTE_BE16(eth->ether_type);
4200
4201         /* VLAN skipping */
4202         while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
4203                 vlan = (struct rte_vlan_hdr *)next_hdr;
4204                 proto = RTE_BE16(vlan->eth_proto);
4205                 next_hdr += sizeof(struct rte_vlan_hdr);
4206         }
4207
4208         /* HW calculates IPv4 csum. no need to proceed */
4209         if (proto == RTE_ETHER_TYPE_IPV4)
4210                 return 0;
4211
4212         /* non IPv4/IPv6 header. not supported */
4213         if (proto != RTE_ETHER_TYPE_IPV6) {
4214                 return rte_flow_error_set(error, ENOTSUP,
4215                                           RTE_FLOW_ERROR_TYPE_ACTION,
4216                                           NULL, "Cannot offload non IPv4/IPv6");
4217         }
4218
4219         ipv6 = (struct rte_ipv6_hdr *)next_hdr;
4220
4221         /* ignore non UDP */
4222         if (ipv6->proto != IPPROTO_UDP)
4223                 return 0;
4224
4225         udp = (struct rte_udp_hdr *)(ipv6 + 1);
4226         udp->dgram_cksum = 0;
4227
4228         return 0;
4229 }
4230
4231 /**
4232  * Convert L2 encap action to DV specification.
4233  *
4234  * @param[in] dev
4235  *   Pointer to rte_eth_dev structure.
4236  * @param[in] action
4237  *   Pointer to action structure.
4238  * @param[in, out] dev_flow
4239  *   Pointer to the mlx5_flow.
4240  * @param[in] transfer
4241  *   Mark if the flow is E-Switch flow.
4242  * @param[out] error
4243  *   Pointer to the error structure.
4244  *
4245  * @return
4246  *   0 on success, a negative errno value otherwise and rte_errno is set.
4247  */
4248 static int
4249 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
4250                                const struct rte_flow_action *action,
4251                                struct mlx5_flow *dev_flow,
4252                                uint8_t transfer,
4253                                struct rte_flow_error *error)
4254 {
4255         const struct rte_flow_item *encap_data;
4256         const struct rte_flow_action_raw_encap *raw_encap_data;
4257         struct mlx5_flow_dv_encap_decap_resource res = {
4258                 .reformat_type =
4259                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
4260                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4261                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
4262         };
4263
4264         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
4265                 raw_encap_data =
4266                         (const struct rte_flow_action_raw_encap *)action->conf;
4267                 res.size = raw_encap_data->size;
4268                 memcpy(res.buf, raw_encap_data->data, res.size);
4269         } else {
4270                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
4271                         encap_data =
4272                                 ((const struct rte_flow_action_vxlan_encap *)
4273                                                 action->conf)->definition;
4274                 else
4275                         encap_data =
4276                                 ((const struct rte_flow_action_nvgre_encap *)
4277                                                 action->conf)->definition;
4278                 if (flow_dv_convert_encap_data(encap_data, res.buf,
4279                                                &res.size, error))
4280                         return -rte_errno;
4281         }
4282         if (flow_dv_zero_encap_udp_csum(res.buf, error))
4283                 return -rte_errno;
4284         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4285                 return rte_flow_error_set(error, EINVAL,
4286                                           RTE_FLOW_ERROR_TYPE_ACTION,
4287                                           NULL, "can't create L2 encap action");
4288         return 0;
4289 }
4290
4291 /**
4292  * Convert L2 decap action to DV specification.
4293  *
4294  * @param[in] dev
4295  *   Pointer to rte_eth_dev structure.
4296  * @param[in, out] dev_flow
4297  *   Pointer to the mlx5_flow.
4298  * @param[in] transfer
4299  *   Mark if the flow is E-Switch flow.
4300  * @param[out] error
4301  *   Pointer to the error structure.
4302  *
4303  * @return
4304  *   0 on success, a negative errno value otherwise and rte_errno is set.
4305  */
4306 static int
4307 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
4308                                struct mlx5_flow *dev_flow,
4309                                uint8_t transfer,
4310                                struct rte_flow_error *error)
4311 {
4312         struct mlx5_flow_dv_encap_decap_resource res = {
4313                 .size = 0,
4314                 .reformat_type =
4315                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
4316                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4317                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
4318         };
4319
4320         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4321                 return rte_flow_error_set(error, EINVAL,
4322                                           RTE_FLOW_ERROR_TYPE_ACTION,
4323                                           NULL, "can't create L2 decap action");
4324         return 0;
4325 }
4326
4327 /**
4328  * Convert raw decap/encap (L3 tunnel) action to DV specification.
4329  *
4330  * @param[in] dev
4331  *   Pointer to rte_eth_dev structure.
4332  * @param[in] action
4333  *   Pointer to action structure.
4334  * @param[in, out] dev_flow
4335  *   Pointer to the mlx5_flow.
4336  * @param[in] attr
4337  *   Pointer to the flow attributes.
4338  * @param[out] error
4339  *   Pointer to the error structure.
4340  *
4341  * @return
4342  *   0 on success, a negative errno value otherwise and rte_errno is set.
4343  */
4344 static int
4345 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
4346                                 const struct rte_flow_action *action,
4347                                 struct mlx5_flow *dev_flow,
4348                                 const struct rte_flow_attr *attr,
4349                                 struct rte_flow_error *error)
4350 {
4351         const struct rte_flow_action_raw_encap *encap_data;
4352         struct mlx5_flow_dv_encap_decap_resource res;
4353
4354         memset(&res, 0, sizeof(res));
4355         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
4356         res.size = encap_data->size;
4357         memcpy(res.buf, encap_data->data, res.size);
4358         res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
4359                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
4360                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
4361         if (attr->transfer)
4362                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4363         else
4364                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4365                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4366         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4367                 return rte_flow_error_set(error, EINVAL,
4368                                           RTE_FLOW_ERROR_TYPE_ACTION,
4369                                           NULL, "can't create encap action");
4370         return 0;
4371 }
4372
4373 /**
4374  * Create action push VLAN.
4375  *
4376  * @param[in] dev
4377  *   Pointer to rte_eth_dev structure.
4378  * @param[in] attr
4379  *   Pointer to the flow attributes.
4380  * @param[in] vlan
4381  *   Pointer to the vlan to push to the Ethernet header.
4382  * @param[in, out] dev_flow
4383  *   Pointer to the mlx5_flow.
4384  * @param[out] error
4385  *   Pointer to the error structure.
4386  *
4387  * @return
4388  *   0 on success, a negative errno value otherwise and rte_errno is set.
4389  */
4390 static int
4391 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
4392                                 const struct rte_flow_attr *attr,
4393                                 const struct rte_vlan_hdr *vlan,
4394                                 struct mlx5_flow *dev_flow,
4395                                 struct rte_flow_error *error)
4396 {
4397         struct mlx5_flow_dv_push_vlan_action_resource res;
4398
4399         memset(&res, 0, sizeof(res));
4400         res.vlan_tag =
4401                 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
4402                                  vlan->vlan_tci);
4403         if (attr->transfer)
4404                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4405         else
4406                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4407                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4408         return flow_dv_push_vlan_action_resource_register
4409                                             (dev, &res, dev_flow, error);
4410 }
4411
4412 /**
4413  * Validate the modify-header actions.
4414  *
4415  * @param[in] action_flags
4416  *   Holds the actions detected until now.
4417  * @param[in] action
4418  *   Pointer to the modify action.
4419  * @param[out] error
4420  *   Pointer to error structure.
4421  *
4422  * @return
4423  *   0 on success, a negative errno value otherwise and rte_errno is set.
4424  */
4425 static int
4426 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
4427                                    const struct rte_flow_action *action,
4428                                    struct rte_flow_error *error)
4429 {
4430         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
4431                 return rte_flow_error_set(error, EINVAL,
4432                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4433                                           NULL, "action configuration not set");
4434         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
4435                 return rte_flow_error_set(error, EINVAL,
4436                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4437                                           "can't have encap action before"
4438                                           " modify action");
4439         return 0;
4440 }
4441
4442 /**
4443  * Validate the modify-header MAC address actions.
4444  *
4445  * @param[in] action_flags
4446  *   Holds the actions detected until now.
4447  * @param[in] action
4448  *   Pointer to the modify action.
4449  * @param[in] item_flags
4450  *   Holds the items detected.
4451  * @param[out] error
4452  *   Pointer to error structure.
4453  *
4454  * @return
4455  *   0 on success, a negative errno value otherwise and rte_errno is set.
4456  */
4457 static int
4458 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
4459                                    const struct rte_flow_action *action,
4460                                    const uint64_t item_flags,
4461                                    struct rte_flow_error *error)
4462 {
4463         int ret = 0;
4464
4465         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4466         if (!ret) {
4467                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
4468                         return rte_flow_error_set(error, EINVAL,
4469                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4470                                                   NULL,
4471                                                   "no L2 item in pattern");
4472         }
4473         return ret;
4474 }
4475
4476 /**
4477  * Validate the modify-header IPv4 address actions.
4478  *
4479  * @param[in] action_flags
4480  *   Holds the actions detected until now.
4481  * @param[in] action
4482  *   Pointer to the modify action.
4483  * @param[in] item_flags
4484  *   Holds the items detected.
4485  * @param[out] error
4486  *   Pointer to error structure.
4487  *
4488  * @return
4489  *   0 on success, a negative errno value otherwise and rte_errno is set.
4490  */
4491 static int
4492 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
4493                                     const struct rte_flow_action *action,
4494                                     const uint64_t item_flags,
4495                                     struct rte_flow_error *error)
4496 {
4497         int ret = 0;
4498         uint64_t layer;
4499
4500         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4501         if (!ret) {
4502                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4503                                  MLX5_FLOW_LAYER_INNER_L3_IPV4 :
4504                                  MLX5_FLOW_LAYER_OUTER_L3_IPV4;
4505                 if (!(item_flags & layer))
4506                         return rte_flow_error_set(error, EINVAL,
4507                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4508                                                   NULL,
4509                                                   "no ipv4 item in pattern");
4510         }
4511         return ret;
4512 }
4513
4514 /**
4515  * Validate the modify-header IPv6 address actions.
4516  *
4517  * @param[in] action_flags
4518  *   Holds the actions detected until now.
4519  * @param[in] action
4520  *   Pointer to the modify action.
4521  * @param[in] item_flags
4522  *   Holds the items detected.
4523  * @param[out] error
4524  *   Pointer to error structure.
4525  *
4526  * @return
4527  *   0 on success, a negative errno value otherwise and rte_errno is set.
4528  */
4529 static int
4530 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
4531                                     const struct rte_flow_action *action,
4532                                     const uint64_t item_flags,
4533                                     struct rte_flow_error *error)
4534 {
4535         int ret = 0;
4536         uint64_t layer;
4537
4538         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4539         if (!ret) {
4540                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4541                                  MLX5_FLOW_LAYER_INNER_L3_IPV6 :
4542                                  MLX5_FLOW_LAYER_OUTER_L3_IPV6;
4543                 if (!(item_flags & layer))
4544                         return rte_flow_error_set(error, EINVAL,
4545                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4546                                                   NULL,
4547                                                   "no ipv6 item in pattern");
4548         }
4549         return ret;
4550 }
4551
4552 /**
4553  * Validate the modify-header TP actions.
4554  *
4555  * @param[in] action_flags
4556  *   Holds the actions detected until now.
4557  * @param[in] action
4558  *   Pointer to the modify action.
4559  * @param[in] item_flags
4560  *   Holds the items detected.
4561  * @param[out] error
4562  *   Pointer to error structure.
4563  *
4564  * @return
4565  *   0 on success, a negative errno value otherwise and rte_errno is set.
4566  */
4567 static int
4568 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
4569                                   const struct rte_flow_action *action,
4570                                   const uint64_t item_flags,
4571                                   struct rte_flow_error *error)
4572 {
4573         int ret = 0;
4574         uint64_t layer;
4575
4576         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4577         if (!ret) {
4578                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4579                                  MLX5_FLOW_LAYER_INNER_L4 :
4580                                  MLX5_FLOW_LAYER_OUTER_L4;
4581                 if (!(item_flags & layer))
4582                         return rte_flow_error_set(error, EINVAL,
4583                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4584                                                   NULL, "no transport layer "
4585                                                   "in pattern");
4586         }
4587         return ret;
4588 }
4589
4590 /**
4591  * Validate the modify-header actions of increment/decrement
4592  * TCP Sequence-number.
4593  *
4594  * @param[in] action_flags
4595  *   Holds the actions detected until now.
4596  * @param[in] action
4597  *   Pointer to the modify action.
4598  * @param[in] item_flags
4599  *   Holds the items detected.
4600  * @param[out] error
4601  *   Pointer to error structure.
4602  *
4603  * @return
4604  *   0 on success, a negative errno value otherwise and rte_errno is set.
4605  */
4606 static int
4607 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
4608                                        const struct rte_flow_action *action,
4609                                        const uint64_t item_flags,
4610                                        struct rte_flow_error *error)
4611 {
4612         int ret = 0;
4613         uint64_t layer;
4614
4615         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4616         if (!ret) {
4617                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4618                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
4619                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
4620                 if (!(item_flags & layer))
4621                         return rte_flow_error_set(error, EINVAL,
4622                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4623                                                   NULL, "no TCP item in"
4624                                                   " pattern");
4625                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
4626                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
4627                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
4628                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
4629                         return rte_flow_error_set(error, EINVAL,
4630                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4631                                                   NULL,
4632                                                   "cannot decrease and increase"
4633                                                   " TCP sequence number"
4634                                                   " at the same time");
4635         }
4636         return ret;
4637 }
4638
4639 /**
4640  * Validate the modify-header actions of increment/decrement
4641  * TCP Acknowledgment number.
4642  *
4643  * @param[in] action_flags
4644  *   Holds the actions detected until now.
4645  * @param[in] action
4646  *   Pointer to the modify action.
4647  * @param[in] item_flags
4648  *   Holds the items detected.
4649  * @param[out] error
4650  *   Pointer to error structure.
4651  *
4652  * @return
4653  *   0 on success, a negative errno value otherwise and rte_errno is set.
4654  */
4655 static int
4656 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
4657                                        const struct rte_flow_action *action,
4658                                        const uint64_t item_flags,
4659                                        struct rte_flow_error *error)
4660 {
4661         int ret = 0;
4662         uint64_t layer;
4663
4664         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4665         if (!ret) {
4666                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4667                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
4668                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
4669                 if (!(item_flags & layer))
4670                         return rte_flow_error_set(error, EINVAL,
4671                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4672                                                   NULL, "no TCP item in"
4673                                                   " pattern");
4674                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
4675                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
4676                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
4677                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
4678                         return rte_flow_error_set(error, EINVAL,
4679                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4680                                                   NULL,
4681                                                   "cannot decrease and increase"
4682                                                   " TCP acknowledgment number"
4683                                                   " at the same time");
4684         }
4685         return ret;
4686 }
4687
4688 /**
4689  * Validate the modify-header TTL actions.
4690  *
4691  * @param[in] action_flags
4692  *   Holds the actions detected until now.
4693  * @param[in] action
4694  *   Pointer to the modify action.
4695  * @param[in] item_flags
4696  *   Holds the items detected.
4697  * @param[out] error
4698  *   Pointer to error structure.
4699  *
4700  * @return
4701  *   0 on success, a negative errno value otherwise and rte_errno is set.
4702  */
4703 static int
4704 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
4705                                    const struct rte_flow_action *action,
4706                                    const uint64_t item_flags,
4707                                    struct rte_flow_error *error)
4708 {
4709         int ret = 0;
4710         uint64_t layer;
4711
4712         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4713         if (!ret) {
4714                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4715                                  MLX5_FLOW_LAYER_INNER_L3 :
4716                                  MLX5_FLOW_LAYER_OUTER_L3;
4717                 if (!(item_flags & layer))
4718                         return rte_flow_error_set(error, EINVAL,
4719                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4720                                                   NULL,
4721                                                   "no IP protocol in pattern");
4722         }
4723         return ret;
4724 }
4725
4726 /**
4727  * Validate the generic modify field actions.
4728  * @param[in] dev
4729  *   Pointer to the rte_eth_dev structure.
4730  * @param[in] action_flags
4731  *   Holds the actions detected until now.
4732  * @param[in] action
4733  *   Pointer to the modify action.
4734  * @param[in] attr
4735  *   Pointer to the flow attributes.
4736  * @param[out] error
4737  *   Pointer to error structure.
4738  *
4739  * @return
4740  *   Number of header fields to modify (0 or more) on success,
4741  *   a negative errno value otherwise and rte_errno is set.
4742  */
4743 static int
4744 flow_dv_validate_action_modify_field(struct rte_eth_dev *dev,
4745                                    const uint64_t action_flags,
4746                                    const struct rte_flow_action *action,
4747                                    const struct rte_flow_attr *attr,
4748                                    struct rte_flow_error *error)
4749 {
4750         int ret = 0;
4751         struct mlx5_priv *priv = dev->data->dev_private;
4752         struct mlx5_dev_config *config = &priv->config;
4753         const struct rte_flow_action_modify_field *action_modify_field =
4754                 action->conf;
4755         uint32_t dst_width = mlx5_flow_item_field_width(config,
4756                                 action_modify_field->dst.field);
4757         uint32_t src_width = mlx5_flow_item_field_width(config,
4758                                 action_modify_field->src.field);
4759
4760         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4761         if (ret)
4762                 return ret;
4763
4764         if (action_modify_field->width == 0)
4765                 return rte_flow_error_set(error, EINVAL,
4766                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4767                                 "no bits are requested to be modified");
4768         else if (action_modify_field->width > dst_width ||
4769                  action_modify_field->width > src_width)
4770                 return rte_flow_error_set(error, EINVAL,
4771                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4772                                 "cannot modify more bits than"
4773                                 " the width of a field");
4774         if (action_modify_field->dst.field != RTE_FLOW_FIELD_VALUE &&
4775             action_modify_field->dst.field != RTE_FLOW_FIELD_POINTER) {
4776                 if ((action_modify_field->dst.offset +
4777                      action_modify_field->width > dst_width) ||
4778                     (action_modify_field->dst.offset % 32))
4779                         return rte_flow_error_set(error, EINVAL,
4780                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4781                                         "destination offset is too big"
4782                                         " or not aligned to 4 bytes");
4783                 if (action_modify_field->dst.level &&
4784                     action_modify_field->dst.field != RTE_FLOW_FIELD_TAG)
4785                         return rte_flow_error_set(error, ENOTSUP,
4786                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4787                                         "inner header fields modification"
4788                                         " is not supported");
4789         }
4790         if (action_modify_field->src.field != RTE_FLOW_FIELD_VALUE &&
4791             action_modify_field->src.field != RTE_FLOW_FIELD_POINTER) {
4792                 if (!attr->transfer && !attr->group)
4793                         return rte_flow_error_set(error, ENOTSUP,
4794                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4795                                         "modify field action is not"
4796                                         " supported for group 0");
4797                 if ((action_modify_field->src.offset +
4798                      action_modify_field->width > src_width) ||
4799                     (action_modify_field->src.offset % 32))
4800                         return rte_flow_error_set(error, EINVAL,
4801                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4802                                         "source offset is too big"
4803                                         " or not aligned to 4 bytes");
4804                 if (action_modify_field->src.level &&
4805                     action_modify_field->src.field != RTE_FLOW_FIELD_TAG)
4806                         return rte_flow_error_set(error, ENOTSUP,
4807                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4808                                         "inner header fields modification"
4809                                         " is not supported");
4810         }
4811         if ((action_modify_field->dst.field ==
4812              action_modify_field->src.field) &&
4813             (action_modify_field->dst.level ==
4814              action_modify_field->src.level))
4815                 return rte_flow_error_set(error, EINVAL,
4816                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4817                                 "source and destination fields"
4818                                 " cannot be the same");
4819         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VALUE ||
4820             action_modify_field->dst.field == RTE_FLOW_FIELD_POINTER)
4821                 return rte_flow_error_set(error, EINVAL,
4822                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4823                                 "immediate value or a pointer to it"
4824                                 " cannot be used as a destination");
4825         if (action_modify_field->dst.field == RTE_FLOW_FIELD_START ||
4826             action_modify_field->src.field == RTE_FLOW_FIELD_START)
4827                 return rte_flow_error_set(error, ENOTSUP,
4828                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4829                                 "modifications of an arbitrary"
4830                                 " place in a packet is not supported");
4831         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VLAN_TYPE ||
4832             action_modify_field->src.field == RTE_FLOW_FIELD_VLAN_TYPE)
4833                 return rte_flow_error_set(error, ENOTSUP,
4834                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4835                                 "modifications of the 802.1Q Tag"
4836                                 " Identifier is not supported");
4837         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VXLAN_VNI ||
4838             action_modify_field->src.field == RTE_FLOW_FIELD_VXLAN_VNI)
4839                 return rte_flow_error_set(error, ENOTSUP,
4840                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4841                                 "modifications of the VXLAN Network"
4842                                 " Identifier is not supported");
4843         if (action_modify_field->dst.field == RTE_FLOW_FIELD_GENEVE_VNI ||
4844             action_modify_field->src.field == RTE_FLOW_FIELD_GENEVE_VNI)
4845                 return rte_flow_error_set(error, ENOTSUP,
4846                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4847                                 "modifications of the GENEVE Network"
4848                                 " Identifier is not supported");
4849         if (action_modify_field->dst.field == RTE_FLOW_FIELD_MARK ||
4850             action_modify_field->src.field == RTE_FLOW_FIELD_MARK ||
4851             action_modify_field->dst.field == RTE_FLOW_FIELD_META ||
4852             action_modify_field->src.field == RTE_FLOW_FIELD_META) {
4853                 if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
4854                     !mlx5_flow_ext_mreg_supported(dev))
4855                         return rte_flow_error_set(error, ENOTSUP,
4856                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4857                                         "cannot modify mark or metadata without"
4858                                         " extended metadata register support");
4859         }
4860         if (action_modify_field->operation != RTE_FLOW_MODIFY_SET)
4861                 return rte_flow_error_set(error, ENOTSUP,
4862                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4863                                 "add and sub operations"
4864                                 " are not supported");
4865         return (action_modify_field->width / 32) +
4866                !!(action_modify_field->width % 32);
4867 }
4868
4869 /**
4870  * Validate jump action.
4871  *
4872  * @param[in] action
4873  *   Pointer to the jump action.
4874  * @param[in] action_flags
4875  *   Holds the actions detected until now.
4876  * @param[in] attributes
4877  *   Pointer to flow attributes
4878  * @param[in] external
4879  *   Action belongs to flow rule created by request external to PMD.
4880  * @param[out] error
4881  *   Pointer to error structure.
4882  *
4883  * @return
4884  *   0 on success, a negative errno value otherwise and rte_errno is set.
4885  */
4886 static int
4887 flow_dv_validate_action_jump(struct rte_eth_dev *dev,
4888                              const struct mlx5_flow_tunnel *tunnel,
4889                              const struct rte_flow_action *action,
4890                              uint64_t action_flags,
4891                              const struct rte_flow_attr *attributes,
4892                              bool external, struct rte_flow_error *error)
4893 {
4894         uint32_t target_group, table;
4895         int ret = 0;
4896         struct flow_grp_info grp_info = {
4897                 .external = !!external,
4898                 .transfer = !!attributes->transfer,
4899                 .fdb_def_rule = 1,
4900                 .std_tbl_fix = 0
4901         };
4902         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
4903                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4904                 return rte_flow_error_set(error, EINVAL,
4905                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4906                                           "can't have 2 fate actions in"
4907                                           " same flow");
4908         if (!action->conf)
4909                 return rte_flow_error_set(error, EINVAL,
4910                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4911                                           NULL, "action configuration not set");
4912         target_group =
4913                 ((const struct rte_flow_action_jump *)action->conf)->group;
4914         ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table,
4915                                        &grp_info, error);
4916         if (ret)
4917                 return ret;
4918         if (attributes->group == target_group &&
4919             !(action_flags & (MLX5_FLOW_ACTION_TUNNEL_SET |
4920                               MLX5_FLOW_ACTION_TUNNEL_MATCH)))
4921                 return rte_flow_error_set(error, EINVAL,
4922                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4923                                           "target group must be other than"
4924                                           " the current flow group");
4925         return 0;
4926 }
4927
4928 /*
4929  * Validate the port_id action.
4930  *
4931  * @param[in] dev
4932  *   Pointer to rte_eth_dev structure.
4933  * @param[in] action_flags
4934  *   Bit-fields that holds the actions detected until now.
4935  * @param[in] action
4936  *   Port_id RTE action structure.
4937  * @param[in] attr
4938  *   Attributes of flow that includes this action.
4939  * @param[out] error
4940  *   Pointer to error structure.
4941  *
4942  * @return
4943  *   0 on success, a negative errno value otherwise and rte_errno is set.
4944  */
4945 static int
4946 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
4947                                 uint64_t action_flags,
4948                                 const struct rte_flow_action *action,
4949                                 const struct rte_flow_attr *attr,
4950                                 struct rte_flow_error *error)
4951 {
4952         const struct rte_flow_action_port_id *port_id;
4953         struct mlx5_priv *act_priv;
4954         struct mlx5_priv *dev_priv;
4955         uint16_t port;
4956
4957         if (!attr->transfer)
4958                 return rte_flow_error_set(error, ENOTSUP,
4959                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4960                                           NULL,
4961                                           "port id action is valid in transfer"
4962                                           " mode only");
4963         if (!action || !action->conf)
4964                 return rte_flow_error_set(error, ENOTSUP,
4965                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4966                                           NULL,
4967                                           "port id action parameters must be"
4968                                           " specified");
4969         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
4970                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4971                 return rte_flow_error_set(error, EINVAL,
4972                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4973                                           "can have only one fate actions in"
4974                                           " a flow");
4975         dev_priv = mlx5_dev_to_eswitch_info(dev);
4976         if (!dev_priv)
4977                 return rte_flow_error_set(error, rte_errno,
4978                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4979                                           NULL,
4980                                           "failed to obtain E-Switch info");
4981         port_id = action->conf;
4982         port = port_id->original ? dev->data->port_id : port_id->id;
4983         act_priv = mlx5_port_to_eswitch_info(port, false);
4984         if (!act_priv)
4985                 return rte_flow_error_set
4986                                 (error, rte_errno,
4987                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
4988                                  "failed to obtain E-Switch port id for port");
4989         if (act_priv->domain_id != dev_priv->domain_id)
4990                 return rte_flow_error_set
4991                                 (error, EINVAL,
4992                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4993                                  "port does not belong to"
4994                                  " E-Switch being configured");
4995         return 0;
4996 }
4997
4998 /**
4999  * Get the maximum number of modify header actions.
5000  *
5001  * @param dev
5002  *   Pointer to rte_eth_dev structure.
5003  * @param flags
5004  *   Flags bits to check if root level.
5005  *
5006  * @return
5007  *   Max number of modify header actions device can support.
5008  */
5009 static inline unsigned int
5010 flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
5011                               uint64_t flags)
5012 {
5013         /*
5014          * There's no way to directly query the max capacity from FW.
5015          * The maximal value on root table should be assumed to be supported.
5016          */
5017         if (!(flags & MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL))
5018                 return MLX5_MAX_MODIFY_NUM;
5019         else
5020                 return MLX5_ROOT_TBL_MODIFY_NUM;
5021 }
5022
5023 /**
5024  * Validate the meter action.
5025  *
5026  * @param[in] dev
5027  *   Pointer to rte_eth_dev structure.
5028  * @param[in] action_flags
5029  *   Bit-fields that holds the actions detected until now.
5030  * @param[in] action
5031  *   Pointer to the meter action.
5032  * @param[in] attr
5033  *   Attributes of flow that includes this action.
5034  * @param[out] error
5035  *   Pointer to error structure.
5036  *
5037  * @return
5038  *   0 on success, a negative errno value otherwise and rte_ernno is set.
5039  */
5040 static int
5041 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
5042                                 uint64_t action_flags,
5043                                 const struct rte_flow_action *action,
5044                                 const struct rte_flow_attr *attr,
5045                                 bool *def_policy,
5046                                 struct rte_flow_error *error)
5047 {
5048         struct mlx5_priv *priv = dev->data->dev_private;
5049         const struct rte_flow_action_meter *am = action->conf;
5050         struct mlx5_flow_meter_info *fm;
5051         struct mlx5_flow_meter_policy *mtr_policy;
5052         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
5053
5054         if (!am)
5055                 return rte_flow_error_set(error, EINVAL,
5056                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5057                                           "meter action conf is NULL");
5058
5059         if (action_flags & MLX5_FLOW_ACTION_METER)
5060                 return rte_flow_error_set(error, ENOTSUP,
5061                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5062                                           "meter chaining not support");
5063         if (action_flags & MLX5_FLOW_ACTION_JUMP)
5064                 return rte_flow_error_set(error, ENOTSUP,
5065                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5066                                           "meter with jump not support");
5067         if (!priv->mtr_en)
5068                 return rte_flow_error_set(error, ENOTSUP,
5069                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5070                                           NULL,
5071                                           "meter action not supported");
5072         fm = mlx5_flow_meter_find(priv, am->mtr_id, NULL);
5073         if (!fm)
5074                 return rte_flow_error_set(error, EINVAL,
5075                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5076                                           "Meter not found");
5077         /* aso meter can always be shared by different domains */
5078         if (fm->ref_cnt && !priv->sh->meter_aso_en &&
5079             !(fm->transfer == attr->transfer ||
5080               (!fm->ingress && !attr->ingress && attr->egress) ||
5081               (!fm->egress && !attr->egress && attr->ingress)))
5082                 return rte_flow_error_set(error, EINVAL,
5083                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5084                         "Flow attributes domain are either invalid "
5085                         "or have a domain conflict with current "
5086                         "meter attributes");
5087         if (fm->def_policy) {
5088                 if (!((attr->transfer &&
5089                         mtrmng->def_policy[MLX5_MTR_DOMAIN_TRANSFER]) ||
5090                         (attr->egress &&
5091                         mtrmng->def_policy[MLX5_MTR_DOMAIN_EGRESS]) ||
5092                         (attr->ingress &&
5093                         mtrmng->def_policy[MLX5_MTR_DOMAIN_INGRESS])))
5094                         return rte_flow_error_set(error, EINVAL,
5095                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5096                                           "Flow attributes domain "
5097                                           "have a conflict with current "
5098                                           "meter domain attributes");
5099                 *def_policy = true;
5100         } else {
5101                 mtr_policy = mlx5_flow_meter_policy_find(dev,
5102                                                 fm->policy_id, NULL);
5103                 if (!mtr_policy)
5104                         return rte_flow_error_set(error, EINVAL,
5105                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5106                                           "Invalid policy id for meter ");
5107                 if (!((attr->transfer && mtr_policy->transfer) ||
5108                         (attr->egress && mtr_policy->egress) ||
5109                         (attr->ingress && mtr_policy->ingress)))
5110                         return rte_flow_error_set(error, EINVAL,
5111                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5112                                           "Flow attributes domain "
5113                                           "have a conflict with current "
5114                                           "meter domain attributes");
5115                 *def_policy = false;
5116         }
5117         return 0;
5118 }
5119
5120 /**
5121  * Validate the age action.
5122  *
5123  * @param[in] action_flags
5124  *   Holds the actions detected until now.
5125  * @param[in] action
5126  *   Pointer to the age action.
5127  * @param[in] dev
5128  *   Pointer to the Ethernet device structure.
5129  * @param[out] error
5130  *   Pointer to error structure.
5131  *
5132  * @return
5133  *   0 on success, a negative errno value otherwise and rte_errno is set.
5134  */
5135 static int
5136 flow_dv_validate_action_age(uint64_t action_flags,
5137                             const struct rte_flow_action *action,
5138                             struct rte_eth_dev *dev,
5139                             struct rte_flow_error *error)
5140 {
5141         struct mlx5_priv *priv = dev->data->dev_private;
5142         const struct rte_flow_action_age *age = action->conf;
5143
5144         if (!priv->config.devx || (priv->sh->cmng.counter_fallback &&
5145             !priv->sh->aso_age_mng))
5146                 return rte_flow_error_set(error, ENOTSUP,
5147                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5148                                           NULL,
5149                                           "age action not supported");
5150         if (!(action->conf))
5151                 return rte_flow_error_set(error, EINVAL,
5152                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5153                                           "configuration cannot be null");
5154         if (!(age->timeout))
5155                 return rte_flow_error_set(error, EINVAL,
5156                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5157                                           "invalid timeout value 0");
5158         if (action_flags & MLX5_FLOW_ACTION_AGE)
5159                 return rte_flow_error_set(error, EINVAL,
5160                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5161                                           "duplicate age actions set");
5162         return 0;
5163 }
5164
5165 /**
5166  * Validate the modify-header IPv4 DSCP actions.
5167  *
5168  * @param[in] action_flags
5169  *   Holds the actions detected until now.
5170  * @param[in] action
5171  *   Pointer to the modify action.
5172  * @param[in] item_flags
5173  *   Holds the items detected.
5174  * @param[out] error
5175  *   Pointer to error structure.
5176  *
5177  * @return
5178  *   0 on success, a negative errno value otherwise and rte_errno is set.
5179  */
5180 static int
5181 flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags,
5182                                          const struct rte_flow_action *action,
5183                                          const uint64_t item_flags,
5184                                          struct rte_flow_error *error)
5185 {
5186         int ret = 0;
5187
5188         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5189         if (!ret) {
5190                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
5191                         return rte_flow_error_set(error, EINVAL,
5192                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5193                                                   NULL,
5194                                                   "no ipv4 item in pattern");
5195         }
5196         return ret;
5197 }
5198
5199 /**
5200  * Validate the modify-header IPv6 DSCP actions.
5201  *
5202  * @param[in] action_flags
5203  *   Holds the actions detected until now.
5204  * @param[in] action
5205  *   Pointer to the modify action.
5206  * @param[in] item_flags
5207  *   Holds the items detected.
5208  * @param[out] error
5209  *   Pointer to error structure.
5210  *
5211  * @return
5212  *   0 on success, a negative errno value otherwise and rte_errno is set.
5213  */
5214 static int
5215 flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,
5216                                          const struct rte_flow_action *action,
5217                                          const uint64_t item_flags,
5218                                          struct rte_flow_error *error)
5219 {
5220         int ret = 0;
5221
5222         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5223         if (!ret) {
5224                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
5225                         return rte_flow_error_set(error, EINVAL,
5226                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5227                                                   NULL,
5228                                                   "no ipv6 item in pattern");
5229         }
5230         return ret;
5231 }
5232
5233 /**
5234  * Match modify-header resource.
5235  *
5236  * @param list
5237  *   Pointer to the hash list.
5238  * @param entry
5239  *   Pointer to exist resource entry object.
5240  * @param key
5241  *   Key of the new entry.
5242  * @param ctx
5243  *   Pointer to new modify-header resource.
5244  *
5245  * @return
5246  *   0 on matching, non-zero otherwise.
5247  */
5248 int
5249 flow_dv_modify_match_cb(struct mlx5_hlist *list __rte_unused,
5250                         struct mlx5_hlist_entry *entry,
5251                         uint64_t key __rte_unused, void *cb_ctx)
5252 {
5253         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5254         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5255         struct mlx5_flow_dv_modify_hdr_resource *resource =
5256                         container_of(entry, typeof(*resource), entry);
5257         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5258
5259         key_len += ref->actions_num * sizeof(ref->actions[0]);
5260         return ref->actions_num != resource->actions_num ||
5261                memcmp(&ref->ft_type, &resource->ft_type, key_len);
5262 }
5263
5264 struct mlx5_hlist_entry *
5265 flow_dv_modify_create_cb(struct mlx5_hlist *list, uint64_t key __rte_unused,
5266                          void *cb_ctx)
5267 {
5268         struct mlx5_dev_ctx_shared *sh = list->ctx;
5269         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5270         struct mlx5dv_dr_domain *ns;
5271         struct mlx5_flow_dv_modify_hdr_resource *entry;
5272         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5273         int ret;
5274         uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
5275         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5276
5277         entry = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*entry) + data_len, 0,
5278                             SOCKET_ID_ANY);
5279         if (!entry) {
5280                 rte_flow_error_set(ctx->error, ENOMEM,
5281                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5282                                    "cannot allocate resource memory");
5283                 return NULL;
5284         }
5285         rte_memcpy(&entry->ft_type,
5286                    RTE_PTR_ADD(ref, offsetof(typeof(*ref), ft_type)),
5287                    key_len + data_len);
5288         if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
5289                 ns = sh->fdb_domain;
5290         else if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
5291                 ns = sh->tx_domain;
5292         else
5293                 ns = sh->rx_domain;
5294         ret = mlx5_flow_os_create_flow_action_modify_header
5295                                         (sh->ctx, ns, entry,
5296                                          data_len, &entry->action);
5297         if (ret) {
5298                 mlx5_free(entry);
5299                 rte_flow_error_set(ctx->error, ENOMEM,
5300                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5301                                    NULL, "cannot create modification action");
5302                 return NULL;
5303         }
5304         return &entry->entry;
5305 }
5306
5307 /**
5308  * Validate the sample action.
5309  *
5310  * @param[in, out] action_flags
5311  *   Holds the actions detected until now.
5312  * @param[in] action
5313  *   Pointer to the sample action.
5314  * @param[in] dev
5315  *   Pointer to the Ethernet device structure.
5316  * @param[in] attr
5317  *   Attributes of flow that includes this action.
5318  * @param[in] item_flags
5319  *   Holds the items detected.
5320  * @param[in] rss
5321  *   Pointer to the RSS action.
5322  * @param[out] sample_rss
5323  *   Pointer to the RSS action in sample action list.
5324  * @param[out] count
5325  *   Pointer to the COUNT action in sample action list.
5326  * @param[out] fdb_mirror_limit
5327  *   Pointer to the FDB mirror limitation flag.
5328  * @param[out] error
5329  *   Pointer to error structure.
5330  *
5331  * @return
5332  *   0 on success, a negative errno value otherwise and rte_errno is set.
5333  */
5334 static int
5335 flow_dv_validate_action_sample(uint64_t *action_flags,
5336                                const struct rte_flow_action *action,
5337                                struct rte_eth_dev *dev,
5338                                const struct rte_flow_attr *attr,
5339                                uint64_t item_flags,
5340                                const struct rte_flow_action_rss *rss,
5341                                const struct rte_flow_action_rss **sample_rss,
5342                                const struct rte_flow_action_count **count,
5343                                int *fdb_mirror_limit,
5344                                struct rte_flow_error *error)
5345 {
5346         struct mlx5_priv *priv = dev->data->dev_private;
5347         struct mlx5_dev_config *dev_conf = &priv->config;
5348         const struct rte_flow_action_sample *sample = action->conf;
5349         const struct rte_flow_action *act;
5350         uint64_t sub_action_flags = 0;
5351         uint16_t queue_index = 0xFFFF;
5352         int actions_n = 0;
5353         int ret;
5354
5355         if (!sample)
5356                 return rte_flow_error_set(error, EINVAL,
5357                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5358                                           "configuration cannot be NULL");
5359         if (sample->ratio == 0)
5360                 return rte_flow_error_set(error, EINVAL,
5361                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5362                                           "ratio value starts from 1");
5363         if (!priv->config.devx || (sample->ratio > 0 && !priv->sampler_en))
5364                 return rte_flow_error_set(error, ENOTSUP,
5365                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5366                                           NULL,
5367                                           "sample action not supported");
5368         if (*action_flags & MLX5_FLOW_ACTION_SAMPLE)
5369                 return rte_flow_error_set(error, EINVAL,
5370                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5371                                           "Multiple sample actions not "
5372                                           "supported");
5373         if (*action_flags & MLX5_FLOW_ACTION_METER)
5374                 return rte_flow_error_set(error, EINVAL,
5375                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5376                                           "wrong action order, meter should "
5377                                           "be after sample action");
5378         if (*action_flags & MLX5_FLOW_ACTION_JUMP)
5379                 return rte_flow_error_set(error, EINVAL,
5380                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5381                                           "wrong action order, jump should "
5382                                           "be after sample action");
5383         act = sample->actions;
5384         for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
5385                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
5386                         return rte_flow_error_set(error, ENOTSUP,
5387                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5388                                                   act, "too many actions");
5389                 switch (act->type) {
5390                 case RTE_FLOW_ACTION_TYPE_QUEUE:
5391                         ret = mlx5_flow_validate_action_queue(act,
5392                                                               sub_action_flags,
5393                                                               dev,
5394                                                               attr, error);
5395                         if (ret < 0)
5396                                 return ret;
5397                         queue_index = ((const struct rte_flow_action_queue *)
5398                                                         (act->conf))->index;
5399                         sub_action_flags |= MLX5_FLOW_ACTION_QUEUE;
5400                         ++actions_n;
5401                         break;
5402                 case RTE_FLOW_ACTION_TYPE_RSS:
5403                         *sample_rss = act->conf;
5404                         ret = mlx5_flow_validate_action_rss(act,
5405                                                             sub_action_flags,
5406                                                             dev, attr,
5407                                                             item_flags,
5408                                                             error);
5409                         if (ret < 0)
5410                                 return ret;
5411                         if (rss && *sample_rss &&
5412                             ((*sample_rss)->level != rss->level ||
5413                             (*sample_rss)->types != rss->types))
5414                                 return rte_flow_error_set(error, ENOTSUP,
5415                                         RTE_FLOW_ERROR_TYPE_ACTION,
5416                                         NULL,
5417                                         "Can't use the different RSS types "
5418                                         "or level in the same flow");
5419                         if (*sample_rss != NULL && (*sample_rss)->queue_num)
5420                                 queue_index = (*sample_rss)->queue[0];
5421                         sub_action_flags |= MLX5_FLOW_ACTION_RSS;
5422                         ++actions_n;
5423                         break;
5424                 case RTE_FLOW_ACTION_TYPE_MARK:
5425                         ret = flow_dv_validate_action_mark(dev, act,
5426                                                            sub_action_flags,
5427                                                            attr, error);
5428                         if (ret < 0)
5429                                 return ret;
5430                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
5431                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK |
5432                                                 MLX5_FLOW_ACTION_MARK_EXT;
5433                         else
5434                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK;
5435                         ++actions_n;
5436                         break;
5437                 case RTE_FLOW_ACTION_TYPE_COUNT:
5438                         ret = flow_dv_validate_action_count
5439                                 (dev, is_shared_action_count(act),
5440                                  *action_flags | sub_action_flags,
5441                                  error);
5442                         if (ret < 0)
5443                                 return ret;
5444                         *count = act->conf;
5445                         sub_action_flags |= MLX5_FLOW_ACTION_COUNT;
5446                         *action_flags |= MLX5_FLOW_ACTION_COUNT;
5447                         ++actions_n;
5448                         break;
5449                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
5450                         ret = flow_dv_validate_action_port_id(dev,
5451                                                               sub_action_flags,
5452                                                               act,
5453                                                               attr,
5454                                                               error);
5455                         if (ret)
5456                                 return ret;
5457                         sub_action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5458                         ++actions_n;
5459                         break;
5460                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5461                         ret = flow_dv_validate_action_raw_encap_decap
5462                                 (dev, NULL, act->conf, attr, &sub_action_flags,
5463                                  &actions_n, action, item_flags, error);
5464                         if (ret < 0)
5465                                 return ret;
5466                         ++actions_n;
5467                         break;
5468                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5469                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5470                         ret = flow_dv_validate_action_l2_encap(dev,
5471                                                                sub_action_flags,
5472                                                                act, attr,
5473                                                                error);
5474                         if (ret < 0)
5475                                 return ret;
5476                         sub_action_flags |= MLX5_FLOW_ACTION_ENCAP;
5477                         ++actions_n;
5478                         break;
5479                 default:
5480                         return rte_flow_error_set(error, ENOTSUP,
5481                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5482                                                   NULL,
5483                                                   "Doesn't support optional "
5484                                                   "action");
5485                 }
5486         }
5487         if (attr->ingress && !attr->transfer) {
5488                 if (!(sub_action_flags & (MLX5_FLOW_ACTION_QUEUE |
5489                                           MLX5_FLOW_ACTION_RSS)))
5490                         return rte_flow_error_set(error, EINVAL,
5491                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5492                                                   NULL,
5493                                                   "Ingress must has a dest "
5494                                                   "QUEUE for Sample");
5495         } else if (attr->egress && !attr->transfer) {
5496                 return rte_flow_error_set(error, ENOTSUP,
5497                                           RTE_FLOW_ERROR_TYPE_ACTION,
5498                                           NULL,
5499                                           "Sample Only support Ingress "
5500                                           "or E-Switch");
5501         } else if (sample->actions->type != RTE_FLOW_ACTION_TYPE_END) {
5502                 MLX5_ASSERT(attr->transfer);
5503                 if (sample->ratio > 1)
5504                         return rte_flow_error_set(error, ENOTSUP,
5505                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5506                                                   NULL,
5507                                                   "E-Switch doesn't support "
5508                                                   "any optional action "
5509                                                   "for sampling");
5510                 if (sub_action_flags & MLX5_FLOW_ACTION_QUEUE)
5511                         return rte_flow_error_set(error, ENOTSUP,
5512                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5513                                                   NULL,
5514                                                   "unsupported action QUEUE");
5515                 if (sub_action_flags & MLX5_FLOW_ACTION_RSS)
5516                         return rte_flow_error_set(error, ENOTSUP,
5517                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5518                                                   NULL,
5519                                                   "unsupported action QUEUE");
5520                 if (!(sub_action_flags & MLX5_FLOW_ACTION_PORT_ID))
5521                         return rte_flow_error_set(error, EINVAL,
5522                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5523                                                   NULL,
5524                                                   "E-Switch must has a dest "
5525                                                   "port for mirroring");
5526                 if (!priv->config.hca_attr.reg_c_preserve &&
5527                      priv->representor_id != UINT16_MAX)
5528                         *fdb_mirror_limit = 1;
5529         }
5530         /* Continue validation for Xcap actions.*/
5531         if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) &&
5532             (queue_index == 0xFFFF ||
5533              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
5534                 if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
5535                      MLX5_FLOW_XCAP_ACTIONS)
5536                         return rte_flow_error_set(error, ENOTSUP,
5537                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5538                                                   NULL, "encap and decap "
5539                                                   "combination aren't "
5540                                                   "supported");
5541                 if (!attr->transfer && attr->ingress && (sub_action_flags &
5542                                                         MLX5_FLOW_ACTION_ENCAP))
5543                         return rte_flow_error_set(error, ENOTSUP,
5544                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5545                                                   NULL, "encap is not supported"
5546                                                   " for ingress traffic");
5547         }
5548         return 0;
5549 }
5550
5551 /**
5552  * Find existing modify-header resource or create and register a new one.
5553  *
5554  * @param dev[in, out]
5555  *   Pointer to rte_eth_dev structure.
5556  * @param[in, out] resource
5557  *   Pointer to modify-header resource.
5558  * @parm[in, out] dev_flow
5559  *   Pointer to the dev_flow.
5560  * @param[out] error
5561  *   pointer to error structure.
5562  *
5563  * @return
5564  *   0 on success otherwise -errno and errno is set.
5565  */
5566 static int
5567 flow_dv_modify_hdr_resource_register
5568                         (struct rte_eth_dev *dev,
5569                          struct mlx5_flow_dv_modify_hdr_resource *resource,
5570                          struct mlx5_flow *dev_flow,
5571                          struct rte_flow_error *error)
5572 {
5573         struct mlx5_priv *priv = dev->data->dev_private;
5574         struct mlx5_dev_ctx_shared *sh = priv->sh;
5575         uint32_t key_len = sizeof(*resource) -
5576                            offsetof(typeof(*resource), ft_type) +
5577                            resource->actions_num * sizeof(resource->actions[0]);
5578         struct mlx5_hlist_entry *entry;
5579         struct mlx5_flow_cb_ctx ctx = {
5580                 .error = error,
5581                 .data = resource,
5582         };
5583         uint64_t key64;
5584
5585         resource->flags = dev_flow->dv.group ? 0 :
5586                           MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
5587         if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
5588                                     resource->flags))
5589                 return rte_flow_error_set(error, EOVERFLOW,
5590                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5591                                           "too many modify header items");
5592         key64 = __rte_raw_cksum(&resource->ft_type, key_len, 0);
5593         entry = mlx5_hlist_register(sh->modify_cmds, key64, &ctx);
5594         if (!entry)
5595                 return -rte_errno;
5596         resource = container_of(entry, typeof(*resource), entry);
5597         dev_flow->handle->dvh.modify_hdr = resource;
5598         return 0;
5599 }
5600
5601 /**
5602  * Get DV flow counter by index.
5603  *
5604  * @param[in] dev
5605  *   Pointer to the Ethernet device structure.
5606  * @param[in] idx
5607  *   mlx5 flow counter index in the container.
5608  * @param[out] ppool
5609  *   mlx5 flow counter pool in the container.
5610  *
5611  * @return
5612  *   Pointer to the counter, NULL otherwise.
5613  */
5614 static struct mlx5_flow_counter *
5615 flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
5616                            uint32_t idx,
5617                            struct mlx5_flow_counter_pool **ppool)
5618 {
5619         struct mlx5_priv *priv = dev->data->dev_private;
5620         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5621         struct mlx5_flow_counter_pool *pool;
5622
5623         /* Decrease to original index and clear shared bit. */
5624         idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
5625         MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cmng->n);
5626         pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
5627         MLX5_ASSERT(pool);
5628         if (ppool)
5629                 *ppool = pool;
5630         return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
5631 }
5632
5633 /**
5634  * Check the devx counter belongs to the pool.
5635  *
5636  * @param[in] pool
5637  *   Pointer to the counter pool.
5638  * @param[in] id
5639  *   The counter devx ID.
5640  *
5641  * @return
5642  *   True if counter belongs to the pool, false otherwise.
5643  */
5644 static bool
5645 flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id)
5646 {
5647         int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
5648                    MLX5_COUNTERS_PER_POOL;
5649
5650         if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
5651                 return true;
5652         return false;
5653 }
5654
5655 /**
5656  * Get a pool by devx counter ID.
5657  *
5658  * @param[in] cmng
5659  *   Pointer to the counter management.
5660  * @param[in] id
5661  *   The counter devx ID.
5662  *
5663  * @return
5664  *   The counter pool pointer if exists, NULL otherwise,
5665  */
5666 static struct mlx5_flow_counter_pool *
5667 flow_dv_find_pool_by_id(struct mlx5_flow_counter_mng *cmng, int id)
5668 {
5669         uint32_t i;
5670         struct mlx5_flow_counter_pool *pool = NULL;
5671
5672         rte_spinlock_lock(&cmng->pool_update_sl);
5673         /* Check last used pool. */
5674         if (cmng->last_pool_idx != POOL_IDX_INVALID &&
5675             flow_dv_is_counter_in_pool(cmng->pools[cmng->last_pool_idx], id)) {
5676                 pool = cmng->pools[cmng->last_pool_idx];
5677                 goto out;
5678         }
5679         /* ID out of range means no suitable pool in the container. */
5680         if (id > cmng->max_id || id < cmng->min_id)
5681                 goto out;
5682         /*
5683          * Find the pool from the end of the container, since mostly counter
5684          * ID is sequence increasing, and the last pool should be the needed
5685          * one.
5686          */
5687         i = cmng->n_valid;
5688         while (i--) {
5689                 struct mlx5_flow_counter_pool *pool_tmp = cmng->pools[i];
5690
5691                 if (flow_dv_is_counter_in_pool(pool_tmp, id)) {
5692                         pool = pool_tmp;
5693                         break;
5694                 }
5695         }
5696 out:
5697         rte_spinlock_unlock(&cmng->pool_update_sl);
5698         return pool;
5699 }
5700
5701 /**
5702  * Resize a counter container.
5703  *
5704  * @param[in] dev
5705  *   Pointer to the Ethernet device structure.
5706  *
5707  * @return
5708  *   0 on success, otherwise negative errno value and rte_errno is set.
5709  */
5710 static int
5711 flow_dv_container_resize(struct rte_eth_dev *dev)
5712 {
5713         struct mlx5_priv *priv = dev->data->dev_private;
5714         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5715         void *old_pools = cmng->pools;
5716         uint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE;
5717         uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
5718         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
5719
5720         if (!pools) {
5721                 rte_errno = ENOMEM;
5722                 return -ENOMEM;
5723         }
5724         if (old_pools)
5725                 memcpy(pools, old_pools, cmng->n *
5726                                        sizeof(struct mlx5_flow_counter_pool *));
5727         cmng->n = resize;
5728         cmng->pools = pools;
5729         if (old_pools)
5730                 mlx5_free(old_pools);
5731         return 0;
5732 }
5733
5734 /**
5735  * Query a devx flow counter.
5736  *
5737  * @param[in] dev
5738  *   Pointer to the Ethernet device structure.
5739  * @param[in] counter
5740  *   Index to the flow counter.
5741  * @param[out] pkts
5742  *   The statistics value of packets.
5743  * @param[out] bytes
5744  *   The statistics value of bytes.
5745  *
5746  * @return
5747  *   0 on success, otherwise a negative errno value and rte_errno is set.
5748  */
5749 static inline int
5750 _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
5751                      uint64_t *bytes)
5752 {
5753         struct mlx5_priv *priv = dev->data->dev_private;
5754         struct mlx5_flow_counter_pool *pool = NULL;
5755         struct mlx5_flow_counter *cnt;
5756         int offset;
5757
5758         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
5759         MLX5_ASSERT(pool);
5760         if (priv->sh->cmng.counter_fallback)
5761                 return mlx5_devx_cmd_flow_counter_query(cnt->dcs_when_active, 0,
5762                                         0, pkts, bytes, 0, NULL, NULL, 0);
5763         rte_spinlock_lock(&pool->sl);
5764         if (!pool->raw) {
5765                 *pkts = 0;
5766                 *bytes = 0;
5767         } else {
5768                 offset = MLX5_CNT_ARRAY_IDX(pool, cnt);
5769                 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
5770                 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
5771         }
5772         rte_spinlock_unlock(&pool->sl);
5773         return 0;
5774 }
5775
5776 /**
5777  * Create and initialize a new counter pool.
5778  *
5779  * @param[in] dev
5780  *   Pointer to the Ethernet device structure.
5781  * @param[out] dcs
5782  *   The devX counter handle.
5783  * @param[in] age
5784  *   Whether the pool is for counter that was allocated for aging.
5785  * @param[in/out] cont_cur
5786  *   Pointer to the container pointer, it will be update in pool resize.
5787  *
5788  * @return
5789  *   The pool container pointer on success, NULL otherwise and rte_errno is set.
5790  */
5791 static struct mlx5_flow_counter_pool *
5792 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
5793                     uint32_t age)
5794 {
5795         struct mlx5_priv *priv = dev->data->dev_private;
5796         struct mlx5_flow_counter_pool *pool;
5797         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5798         bool fallback = priv->sh->cmng.counter_fallback;
5799         uint32_t size = sizeof(*pool);
5800
5801         size += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE;
5802         size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * MLX5_AGE_SIZE);
5803         pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
5804         if (!pool) {
5805                 rte_errno = ENOMEM;
5806                 return NULL;
5807         }
5808         pool->raw = NULL;
5809         pool->is_aged = !!age;
5810         pool->query_gen = 0;
5811         pool->min_dcs = dcs;
5812         rte_spinlock_init(&pool->sl);
5813         rte_spinlock_init(&pool->csl);
5814         TAILQ_INIT(&pool->counters[0]);
5815         TAILQ_INIT(&pool->counters[1]);
5816         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
5817         rte_spinlock_lock(&cmng->pool_update_sl);
5818         pool->index = cmng->n_valid;
5819         if (pool->index == cmng->n && flow_dv_container_resize(dev)) {
5820                 mlx5_free(pool);
5821                 rte_spinlock_unlock(&cmng->pool_update_sl);
5822                 return NULL;
5823         }
5824         cmng->pools[pool->index] = pool;
5825         cmng->n_valid++;
5826         if (unlikely(fallback)) {
5827                 int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
5828
5829                 if (base < cmng->min_id)
5830                         cmng->min_id = base;
5831                 if (base > cmng->max_id)
5832                         cmng->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
5833                 cmng->last_pool_idx = pool->index;
5834         }
5835         rte_spinlock_unlock(&cmng->pool_update_sl);
5836         return pool;
5837 }
5838
5839 /**
5840  * Prepare a new counter and/or a new counter pool.
5841  *
5842  * @param[in] dev
5843  *   Pointer to the Ethernet device structure.
5844  * @param[out] cnt_free
5845  *   Where to put the pointer of a new counter.
5846  * @param[in] age
5847  *   Whether the pool is for counter that was allocated for aging.
5848  *
5849  * @return
5850  *   The counter pool pointer and @p cnt_free is set on success,
5851  *   NULL otherwise and rte_errno is set.
5852  */
5853 static struct mlx5_flow_counter_pool *
5854 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
5855                              struct mlx5_flow_counter **cnt_free,
5856                              uint32_t age)
5857 {
5858         struct mlx5_priv *priv = dev->data->dev_private;
5859         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5860         struct mlx5_flow_counter_pool *pool;
5861         struct mlx5_counters tmp_tq;
5862         struct mlx5_devx_obj *dcs = NULL;
5863         struct mlx5_flow_counter *cnt;
5864         enum mlx5_counter_type cnt_type =
5865                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
5866         bool fallback = priv->sh->cmng.counter_fallback;
5867         uint32_t i;
5868
5869         if (fallback) {
5870                 /* bulk_bitmap must be 0 for single counter allocation. */
5871                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
5872                 if (!dcs)
5873                         return NULL;
5874                 pool = flow_dv_find_pool_by_id(cmng, dcs->id);
5875                 if (!pool) {
5876                         pool = flow_dv_pool_create(dev, dcs, age);
5877                         if (!pool) {
5878                                 mlx5_devx_cmd_destroy(dcs);
5879                                 return NULL;
5880                         }
5881                 }
5882                 i = dcs->id % MLX5_COUNTERS_PER_POOL;
5883                 cnt = MLX5_POOL_GET_CNT(pool, i);
5884                 cnt->pool = pool;
5885                 cnt->dcs_when_free = dcs;
5886                 *cnt_free = cnt;
5887                 return pool;
5888         }
5889         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
5890         if (!dcs) {
5891                 rte_errno = ENODATA;
5892                 return NULL;
5893         }
5894         pool = flow_dv_pool_create(dev, dcs, age);
5895         if (!pool) {
5896                 mlx5_devx_cmd_destroy(dcs);
5897                 return NULL;
5898         }
5899         TAILQ_INIT(&tmp_tq);
5900         for (i = 1; i < MLX5_COUNTERS_PER_POOL; ++i) {
5901                 cnt = MLX5_POOL_GET_CNT(pool, i);
5902                 cnt->pool = pool;
5903                 TAILQ_INSERT_HEAD(&tmp_tq, cnt, next);
5904         }
5905         rte_spinlock_lock(&cmng->csl[cnt_type]);
5906         TAILQ_CONCAT(&cmng->counters[cnt_type], &tmp_tq, next);
5907         rte_spinlock_unlock(&cmng->csl[cnt_type]);
5908         *cnt_free = MLX5_POOL_GET_CNT(pool, 0);
5909         (*cnt_free)->pool = pool;
5910         return pool;
5911 }
5912
5913 /**
5914  * Allocate a flow counter.
5915  *
5916  * @param[in] dev
5917  *   Pointer to the Ethernet device structure.
5918  * @param[in] age
5919  *   Whether the counter was allocated for aging.
5920  *
5921  * @return
5922  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
5923  */
5924 static uint32_t
5925 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age)
5926 {
5927         struct mlx5_priv *priv = dev->data->dev_private;
5928         struct mlx5_flow_counter_pool *pool = NULL;
5929         struct mlx5_flow_counter *cnt_free = NULL;
5930         bool fallback = priv->sh->cmng.counter_fallback;
5931         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5932         enum mlx5_counter_type cnt_type =
5933                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
5934         uint32_t cnt_idx;
5935
5936         if (!priv->config.devx) {
5937                 rte_errno = ENOTSUP;
5938                 return 0;
5939         }
5940         /* Get free counters from container. */
5941         rte_spinlock_lock(&cmng->csl[cnt_type]);
5942         cnt_free = TAILQ_FIRST(&cmng->counters[cnt_type]);
5943         if (cnt_free)
5944                 TAILQ_REMOVE(&cmng->counters[cnt_type], cnt_free, next);
5945         rte_spinlock_unlock(&cmng->csl[cnt_type]);
5946         if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free, age))
5947                 goto err;
5948         pool = cnt_free->pool;
5949         if (fallback)
5950                 cnt_free->dcs_when_active = cnt_free->dcs_when_free;
5951         /* Create a DV counter action only in the first time usage. */
5952         if (!cnt_free->action) {
5953                 uint16_t offset;
5954                 struct mlx5_devx_obj *dcs;
5955                 int ret;
5956
5957                 if (!fallback) {
5958                         offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free);
5959                         dcs = pool->min_dcs;
5960                 } else {
5961                         offset = 0;
5962                         dcs = cnt_free->dcs_when_free;
5963                 }
5964                 ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset,
5965                                                             &cnt_free->action);
5966                 if (ret) {
5967                         rte_errno = errno;
5968                         goto err;
5969                 }
5970         }
5971         cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
5972                                 MLX5_CNT_ARRAY_IDX(pool, cnt_free));
5973         /* Update the counter reset values. */
5974         if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
5975                                  &cnt_free->bytes))
5976                 goto err;
5977         if (!fallback && !priv->sh->cmng.query_thread_on)
5978                 /* Start the asynchronous batch query by the host thread. */
5979                 mlx5_set_query_alarm(priv->sh);
5980         /*
5981          * When the count action isn't shared (by ID), shared_info field is
5982          * used for indirect action API's refcnt.
5983          * When the counter action is not shared neither by ID nor by indirect
5984          * action API, shared info must be 1.
5985          */
5986         cnt_free->shared_info.refcnt = 1;
5987         return cnt_idx;
5988 err:
5989         if (cnt_free) {
5990                 cnt_free->pool = pool;
5991                 if (fallback)
5992                         cnt_free->dcs_when_free = cnt_free->dcs_when_active;
5993                 rte_spinlock_lock(&cmng->csl[cnt_type]);
5994                 TAILQ_INSERT_TAIL(&cmng->counters[cnt_type], cnt_free, next);
5995                 rte_spinlock_unlock(&cmng->csl[cnt_type]);
5996         }
5997         return 0;
5998 }
5999
6000 /**
6001  * Allocate a shared flow counter.
6002  *
6003  * @param[in] ctx
6004  *   Pointer to the shared counter configuration.
6005  * @param[in] data
6006  *   Pointer to save the allocated counter index.
6007  *
6008  * @return
6009  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
6010  */
6011
6012 static int32_t
6013 flow_dv_counter_alloc_shared_cb(void *ctx, union mlx5_l3t_data *data)
6014 {
6015         struct mlx5_shared_counter_conf *conf = ctx;
6016         struct rte_eth_dev *dev = conf->dev;
6017         struct mlx5_flow_counter *cnt;
6018
6019         data->dword = flow_dv_counter_alloc(dev, 0);
6020         data->dword |= MLX5_CNT_SHARED_OFFSET;
6021         cnt = flow_dv_counter_get_by_idx(dev, data->dword, NULL);
6022         cnt->shared_info.id = conf->id;
6023         return 0;
6024 }
6025
6026 /**
6027  * Get a shared flow counter.
6028  *
6029  * @param[in] dev
6030  *   Pointer to the Ethernet device structure.
6031  * @param[in] id
6032  *   Counter identifier.
6033  *
6034  * @return
6035  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
6036  */
6037 static uint32_t
6038 flow_dv_counter_get_shared(struct rte_eth_dev *dev, uint32_t id)
6039 {
6040         struct mlx5_priv *priv = dev->data->dev_private;
6041         struct mlx5_shared_counter_conf conf = {
6042                 .dev = dev,
6043                 .id = id,
6044         };
6045         union mlx5_l3t_data data = {
6046                 .dword = 0,
6047         };
6048
6049         mlx5_l3t_prepare_entry(priv->sh->cnt_id_tbl, id, &data,
6050                                flow_dv_counter_alloc_shared_cb, &conf);
6051         return data.dword;
6052 }
6053
6054 /**
6055  * Get age param from counter index.
6056  *
6057  * @param[in] dev
6058  *   Pointer to the Ethernet device structure.
6059  * @param[in] counter
6060  *   Index to the counter handler.
6061  *
6062  * @return
6063  *   The aging parameter specified for the counter index.
6064  */
6065 static struct mlx5_age_param*
6066 flow_dv_counter_idx_get_age(struct rte_eth_dev *dev,
6067                                 uint32_t counter)
6068 {
6069         struct mlx5_flow_counter *cnt;
6070         struct mlx5_flow_counter_pool *pool = NULL;
6071
6072         flow_dv_counter_get_by_idx(dev, counter, &pool);
6073         counter = (counter - 1) % MLX5_COUNTERS_PER_POOL;
6074         cnt = MLX5_POOL_GET_CNT(pool, counter);
6075         return MLX5_CNT_TO_AGE(cnt);
6076 }
6077
6078 /**
6079  * Remove a flow counter from aged counter list.
6080  *
6081  * @param[in] dev
6082  *   Pointer to the Ethernet device structure.
6083  * @param[in] counter
6084  *   Index to the counter handler.
6085  * @param[in] cnt
6086  *   Pointer to the counter handler.
6087  */
6088 static void
6089 flow_dv_counter_remove_from_age(struct rte_eth_dev *dev,
6090                                 uint32_t counter, struct mlx5_flow_counter *cnt)
6091 {
6092         struct mlx5_age_info *age_info;
6093         struct mlx5_age_param *age_param;
6094         struct mlx5_priv *priv = dev->data->dev_private;
6095         uint16_t expected = AGE_CANDIDATE;
6096
6097         age_info = GET_PORT_AGE_INFO(priv);
6098         age_param = flow_dv_counter_idx_get_age(dev, counter);
6099         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
6100                                          AGE_FREE, false, __ATOMIC_RELAXED,
6101                                          __ATOMIC_RELAXED)) {
6102                 /**
6103                  * We need the lock even it is age timeout,
6104                  * since counter may still in process.
6105                  */
6106                 rte_spinlock_lock(&age_info->aged_sl);
6107                 TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
6108                 rte_spinlock_unlock(&age_info->aged_sl);
6109                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
6110         }
6111 }
6112
6113 /**
6114  * Release a flow counter.
6115  *
6116  * @param[in] dev
6117  *   Pointer to the Ethernet device structure.
6118  * @param[in] counter
6119  *   Index to the counter handler.
6120  */
6121 static void
6122 flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter)
6123 {
6124         struct mlx5_priv *priv = dev->data->dev_private;
6125         struct mlx5_flow_counter_pool *pool = NULL;
6126         struct mlx5_flow_counter *cnt;
6127         enum mlx5_counter_type cnt_type;
6128
6129         if (!counter)
6130                 return;
6131         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
6132         MLX5_ASSERT(pool);
6133         if (pool->is_aged) {
6134                 flow_dv_counter_remove_from_age(dev, counter, cnt);
6135         } else {
6136                 /*
6137                  * If the counter action is shared by ID, the l3t_clear_entry
6138                  * function reduces its references counter. If after the
6139                  * reduction the action is still referenced, the function
6140                  * returns here and does not release it.
6141                  */
6142                 if (IS_LEGACY_SHARED_CNT(counter) &&
6143                     mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl,
6144                                          cnt->shared_info.id))
6145                         return;
6146                 /*
6147                  * If the counter action is shared by indirect action API,
6148                  * the atomic function reduces its references counter.
6149                  * If after the reduction the action is still referenced, the
6150                  * function returns here and does not release it.
6151                  * When the counter action is not shared neither by ID nor by
6152                  * indirect action API, shared info is 1 before the reduction,
6153                  * so this condition is failed and function doesn't return here.
6154                  */
6155                 if (!IS_LEGACY_SHARED_CNT(counter) &&
6156                     __atomic_sub_fetch(&cnt->shared_info.refcnt, 1,
6157                                        __ATOMIC_RELAXED))
6158                         return;
6159         }
6160         cnt->pool = pool;
6161         /*
6162          * Put the counter back to list to be updated in none fallback mode.
6163          * Currently, we are using two list alternately, while one is in query,
6164          * add the freed counter to the other list based on the pool query_gen
6165          * value. After query finishes, add counter the list to the global
6166          * container counter list. The list changes while query starts. In
6167          * this case, lock will not be needed as query callback and release
6168          * function both operate with the different list.
6169          */
6170         if (!priv->sh->cmng.counter_fallback) {
6171                 rte_spinlock_lock(&pool->csl);
6172                 TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
6173                 rte_spinlock_unlock(&pool->csl);
6174         } else {
6175                 cnt->dcs_when_free = cnt->dcs_when_active;
6176                 cnt_type = pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
6177                                            MLX5_COUNTER_TYPE_ORIGIN;
6178                 rte_spinlock_lock(&priv->sh->cmng.csl[cnt_type]);
6179                 TAILQ_INSERT_TAIL(&priv->sh->cmng.counters[cnt_type],
6180                                   cnt, next);
6181                 rte_spinlock_unlock(&priv->sh->cmng.csl[cnt_type]);
6182         }
6183 }
6184
6185 /**
6186  * Resize a meter id container.
6187  *
6188  * @param[in] dev
6189  *   Pointer to the Ethernet device structure.
6190  *
6191  * @return
6192  *   0 on success, otherwise negative errno value and rte_errno is set.
6193  */
6194 static int
6195 flow_dv_mtr_container_resize(struct rte_eth_dev *dev)
6196 {
6197         struct mlx5_priv *priv = dev->data->dev_private;
6198         struct mlx5_aso_mtr_pools_mng *pools_mng =
6199                                 &priv->sh->mtrmng->pools_mng;
6200         void *old_pools = pools_mng->pools;
6201         uint32_t resize = pools_mng->n + MLX5_MTRS_CONTAINER_RESIZE;
6202         uint32_t mem_size = sizeof(struct mlx5_aso_mtr_pool *) * resize;
6203         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
6204
6205         if (!pools) {
6206                 rte_errno = ENOMEM;
6207                 return -ENOMEM;
6208         }
6209         if (!pools_mng->n)
6210                 if (mlx5_aso_queue_init(priv->sh, ASO_OPC_MOD_POLICER)) {
6211                         mlx5_free(pools);
6212                         return -ENOMEM;
6213                 }
6214         if (old_pools)
6215                 memcpy(pools, old_pools, pools_mng->n *
6216                                        sizeof(struct mlx5_aso_mtr_pool *));
6217         pools_mng->n = resize;
6218         pools_mng->pools = pools;
6219         if (old_pools)
6220                 mlx5_free(old_pools);
6221         return 0;
6222 }
6223
6224 /**
6225  * Prepare a new meter and/or a new meter pool.
6226  *
6227  * @param[in] dev
6228  *   Pointer to the Ethernet device structure.
6229  * @param[out] mtr_free
6230  *   Where to put the pointer of a new meter.g.
6231  *
6232  * @return
6233  *   The meter pool pointer and @mtr_free is set on success,
6234  *   NULL otherwise and rte_errno is set.
6235  */
6236 static struct mlx5_aso_mtr_pool *
6237 flow_dv_mtr_pool_create(struct rte_eth_dev *dev,
6238                              struct mlx5_aso_mtr **mtr_free)
6239 {
6240         struct mlx5_priv *priv = dev->data->dev_private;
6241         struct mlx5_aso_mtr_pools_mng *pools_mng =
6242                                 &priv->sh->mtrmng->pools_mng;
6243         struct mlx5_aso_mtr_pool *pool = NULL;
6244         struct mlx5_devx_obj *dcs = NULL;
6245         uint32_t i;
6246         uint32_t log_obj_size;
6247
6248         log_obj_size = rte_log2_u32(MLX5_ASO_MTRS_PER_POOL >> 1);
6249         dcs = mlx5_devx_cmd_create_flow_meter_aso_obj(priv->sh->ctx,
6250                         priv->sh->pdn, log_obj_size);
6251         if (!dcs) {
6252                 rte_errno = ENODATA;
6253                 return NULL;
6254         }
6255         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
6256         if (!pool) {
6257                 rte_errno = ENOMEM;
6258                 claim_zero(mlx5_devx_cmd_destroy(dcs));
6259                 return NULL;
6260         }
6261         pool->devx_obj = dcs;
6262         pool->index = pools_mng->n_valid;
6263         if (pool->index == pools_mng->n && flow_dv_mtr_container_resize(dev)) {
6264                 mlx5_free(pool);
6265                 claim_zero(mlx5_devx_cmd_destroy(dcs));
6266                 return NULL;
6267         }
6268         pools_mng->pools[pool->index] = pool;
6269         pools_mng->n_valid++;
6270         for (i = 1; i < MLX5_ASO_MTRS_PER_POOL; ++i) {
6271                 pool->mtrs[i].offset = i;
6272                 LIST_INSERT_HEAD(&pools_mng->meters,
6273                                                 &pool->mtrs[i], next);
6274         }
6275         pool->mtrs[0].offset = 0;
6276         *mtr_free = &pool->mtrs[0];
6277         return pool;
6278 }
6279
6280 /**
6281  * Release a flow meter into pool.
6282  *
6283  * @param[in] dev
6284  *   Pointer to the Ethernet device structure.
6285  * @param[in] mtr_idx
6286  *   Index to aso flow meter.
6287  */
6288 static void
6289 flow_dv_aso_mtr_release_to_pool(struct rte_eth_dev *dev, uint32_t mtr_idx)
6290 {
6291         struct mlx5_priv *priv = dev->data->dev_private;
6292         struct mlx5_aso_mtr_pools_mng *pools_mng =
6293                                 &priv->sh->mtrmng->pools_mng;
6294         struct mlx5_aso_mtr *aso_mtr = mlx5_aso_meter_by_idx(priv, mtr_idx);
6295
6296         MLX5_ASSERT(aso_mtr);
6297         rte_spinlock_lock(&pools_mng->mtrsl);
6298         memset(&aso_mtr->fm, 0, sizeof(struct mlx5_flow_meter_info));
6299         aso_mtr->state = ASO_METER_FREE;
6300         LIST_INSERT_HEAD(&pools_mng->meters, aso_mtr, next);
6301         rte_spinlock_unlock(&pools_mng->mtrsl);
6302 }
6303
6304 /**
6305  * Allocate a aso flow meter.
6306  *
6307  * @param[in] dev
6308  *   Pointer to the Ethernet device structure.
6309  *
6310  * @return
6311  *   Index to aso flow meter on success, 0 otherwise and rte_errno is set.
6312  */
6313 static uint32_t
6314 flow_dv_mtr_alloc(struct rte_eth_dev *dev)
6315 {
6316         struct mlx5_priv *priv = dev->data->dev_private;
6317         struct mlx5_aso_mtr *mtr_free = NULL;
6318         struct mlx5_aso_mtr_pools_mng *pools_mng =
6319                                 &priv->sh->mtrmng->pools_mng;
6320         struct mlx5_aso_mtr_pool *pool;
6321         uint32_t mtr_idx = 0;
6322
6323         if (!priv->config.devx) {
6324                 rte_errno = ENOTSUP;
6325                 return 0;
6326         }
6327         /* Allocate the flow meter memory. */
6328         /* Get free meters from management. */
6329         rte_spinlock_lock(&pools_mng->mtrsl);
6330         mtr_free = LIST_FIRST(&pools_mng->meters);
6331         if (mtr_free)
6332                 LIST_REMOVE(mtr_free, next);
6333         if (!mtr_free && !flow_dv_mtr_pool_create(dev, &mtr_free)) {
6334                 rte_spinlock_unlock(&pools_mng->mtrsl);
6335                 return 0;
6336         }
6337         mtr_free->state = ASO_METER_WAIT;
6338         rte_spinlock_unlock(&pools_mng->mtrsl);
6339         pool = container_of(mtr_free,
6340                         struct mlx5_aso_mtr_pool,
6341                         mtrs[mtr_free->offset]);
6342         mtr_idx = MLX5_MAKE_MTR_IDX(pool->index, mtr_free->offset);
6343         if (!mtr_free->fm.meter_action) {
6344 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
6345                 struct rte_flow_error error;
6346                 uint8_t reg_id;
6347
6348                 reg_id = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &error);
6349                 mtr_free->fm.meter_action =
6350                         mlx5_glue->dv_create_flow_action_aso
6351                                                 (priv->sh->rx_domain,
6352                                                  pool->devx_obj->obj,
6353                                                  mtr_free->offset,
6354                                                  (1 << MLX5_FLOW_COLOR_GREEN),
6355                                                  reg_id - REG_C_0);
6356 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
6357                 if (!mtr_free->fm.meter_action) {
6358                         flow_dv_aso_mtr_release_to_pool(dev, mtr_idx);
6359                         return 0;
6360                 }
6361         }
6362         return mtr_idx;
6363 }
6364
6365 /**
6366  * Verify the @p attributes will be correctly understood by the NIC and store
6367  * them in the @p flow if everything is correct.
6368  *
6369  * @param[in] dev
6370  *   Pointer to dev struct.
6371  * @param[in] attributes
6372  *   Pointer to flow attributes
6373  * @param[in] external
6374  *   This flow rule is created by request external to PMD.
6375  * @param[out] error
6376  *   Pointer to error structure.
6377  *
6378  * @return
6379  *   - 0 on success and non root table.
6380  *   - 1 on success and root table.
6381  *   - a negative errno value otherwise and rte_errno is set.
6382  */
6383 static int
6384 flow_dv_validate_attributes(struct rte_eth_dev *dev,
6385                             const struct mlx5_flow_tunnel *tunnel,
6386                             const struct rte_flow_attr *attributes,
6387                             const struct flow_grp_info *grp_info,
6388                             struct rte_flow_error *error)
6389 {
6390         struct mlx5_priv *priv = dev->data->dev_private;
6391         uint32_t lowest_priority = mlx5_get_lowest_priority(dev, attributes);
6392         int ret = 0;
6393
6394 #ifndef HAVE_MLX5DV_DR
6395         RTE_SET_USED(tunnel);
6396         RTE_SET_USED(grp_info);
6397         if (attributes->group)
6398                 return rte_flow_error_set(error, ENOTSUP,
6399                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
6400                                           NULL,
6401                                           "groups are not supported");
6402 #else
6403         uint32_t table = 0;
6404
6405         ret = mlx5_flow_group_to_table(dev, tunnel, attributes->group, &table,
6406                                        grp_info, error);
6407         if (ret)
6408                 return ret;
6409         if (!table)
6410                 ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
6411 #endif
6412         if (attributes->priority != MLX5_FLOW_LOWEST_PRIO_INDICATOR &&
6413             attributes->priority > lowest_priority)
6414                 return rte_flow_error_set(error, ENOTSUP,
6415                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
6416                                           NULL,
6417                                           "priority out of range");
6418         if (attributes->transfer) {
6419                 if (!priv->config.dv_esw_en)
6420                         return rte_flow_error_set
6421                                 (error, ENOTSUP,
6422                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6423                                  "E-Switch dr is not supported");
6424                 if (!(priv->representor || priv->master))
6425                         return rte_flow_error_set
6426                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6427                                  NULL, "E-Switch configuration can only be"
6428                                  " done by a master or a representor device");
6429                 if (attributes->egress)
6430                         return rte_flow_error_set
6431                                 (error, ENOTSUP,
6432                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
6433                                  "egress is not supported");
6434         }
6435         if (!(attributes->egress ^ attributes->ingress))
6436                 return rte_flow_error_set(error, ENOTSUP,
6437                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
6438                                           "must specify exactly one of "
6439                                           "ingress or egress");
6440         return ret;
6441 }
6442
6443 static uint16_t
6444 mlx5_flow_locate_proto_l3(const struct rte_flow_item **head,
6445                           const struct rte_flow_item *end)
6446 {
6447         const struct rte_flow_item *item = *head;
6448         uint16_t l3_protocol;
6449
6450         for (; item != end; item++) {
6451                 switch (item->type) {
6452                 default:
6453                         break;
6454                 case RTE_FLOW_ITEM_TYPE_IPV4:
6455                         l3_protocol = RTE_ETHER_TYPE_IPV4;
6456                         goto l3_ok;
6457                 case RTE_FLOW_ITEM_TYPE_IPV6:
6458                         l3_protocol = RTE_ETHER_TYPE_IPV6;
6459                         goto l3_ok;
6460                 case RTE_FLOW_ITEM_TYPE_ETH:
6461                         if (item->mask && item->spec) {
6462                                 MLX5_ETHER_TYPE_FROM_HEADER(rte_flow_item_eth,
6463                                                             type, item,
6464                                                             l3_protocol);
6465                                 if (l3_protocol == RTE_ETHER_TYPE_IPV4 ||
6466                                     l3_protocol == RTE_ETHER_TYPE_IPV6)
6467                                         goto l3_ok;
6468                         }
6469                         break;
6470                 case RTE_FLOW_ITEM_TYPE_VLAN:
6471                         if (item->mask && item->spec) {
6472                                 MLX5_ETHER_TYPE_FROM_HEADER(rte_flow_item_vlan,
6473                                                             inner_type, item,
6474                                                             l3_protocol);
6475                                 if (l3_protocol == RTE_ETHER_TYPE_IPV4 ||
6476                                     l3_protocol == RTE_ETHER_TYPE_IPV6)
6477                                         goto l3_ok;
6478                         }
6479                         break;
6480                 }
6481         }
6482         return 0;
6483 l3_ok:
6484         *head = item;
6485         return l3_protocol;
6486 }
6487
6488 static uint8_t
6489 mlx5_flow_locate_proto_l4(const struct rte_flow_item **head,
6490                           const struct rte_flow_item *end)
6491 {
6492         const struct rte_flow_item *item = *head;
6493         uint8_t l4_protocol;
6494
6495         for (; item != end; item++) {
6496                 switch (item->type) {
6497                 default:
6498                         break;
6499                 case RTE_FLOW_ITEM_TYPE_TCP:
6500                         l4_protocol = IPPROTO_TCP;
6501                         goto l4_ok;
6502                 case RTE_FLOW_ITEM_TYPE_UDP:
6503                         l4_protocol = IPPROTO_UDP;
6504                         goto l4_ok;
6505                 case RTE_FLOW_ITEM_TYPE_IPV4:
6506                         if (item->mask && item->spec) {
6507                                 const struct rte_flow_item_ipv4 *mask, *spec;
6508
6509                                 mask = (typeof(mask))item->mask;
6510                                 spec = (typeof(spec))item->spec;
6511                                 l4_protocol = mask->hdr.next_proto_id &
6512                                               spec->hdr.next_proto_id;
6513                                 if (l4_protocol == IPPROTO_TCP ||
6514                                     l4_protocol == IPPROTO_UDP)
6515                                         goto l4_ok;
6516                         }
6517                         break;
6518                 case RTE_FLOW_ITEM_TYPE_IPV6:
6519                         if (item->mask && item->spec) {
6520                                 const struct rte_flow_item_ipv6 *mask, *spec;
6521                                 mask = (typeof(mask))item->mask;
6522                                 spec = (typeof(spec))item->spec;
6523                                 l4_protocol = mask->hdr.proto & spec->hdr.proto;
6524                                 if (l4_protocol == IPPROTO_TCP ||
6525                                     l4_protocol == IPPROTO_UDP)
6526                                         goto l4_ok;
6527                         }
6528                         break;
6529                 }
6530         }
6531         return 0;
6532 l4_ok:
6533         *head = item;
6534         return l4_protocol;
6535 }
6536
6537 static int
6538 flow_dv_validate_item_integrity(struct rte_eth_dev *dev,
6539                                 const struct rte_flow_item *rule_items,
6540                                 const struct rte_flow_item *integrity_item,
6541                                 struct rte_flow_error *error)
6542 {
6543         struct mlx5_priv *priv = dev->data->dev_private;
6544         const struct rte_flow_item *tunnel_item, *end_item, *item = rule_items;
6545         const struct rte_flow_item_integrity *mask = (typeof(mask))
6546                                                      integrity_item->mask;
6547         const struct rte_flow_item_integrity *spec = (typeof(spec))
6548                                                      integrity_item->spec;
6549         uint32_t protocol;
6550
6551         if (!priv->config.hca_attr.pkt_integrity_match)
6552                 return rte_flow_error_set(error, ENOTSUP,
6553                                           RTE_FLOW_ERROR_TYPE_ITEM,
6554                                           integrity_item,
6555                                           "packet integrity integrity_item not supported");
6556         if (!mask)
6557                 mask = &rte_flow_item_integrity_mask;
6558         if (!mlx5_validate_integrity_item(mask))
6559                 return rte_flow_error_set(error, ENOTSUP,
6560                                           RTE_FLOW_ERROR_TYPE_ITEM,
6561                                           integrity_item,
6562                                           "unsupported integrity filter");
6563         tunnel_item = mlx5_flow_find_tunnel_item(rule_items);
6564         if (spec->level > 1) {
6565                 if (!tunnel_item)
6566                         return rte_flow_error_set(error, ENOTSUP,
6567                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6568                                                   integrity_item,
6569                                                   "missing tunnel item");
6570                 item = tunnel_item;
6571                 end_item = mlx5_find_end_item(tunnel_item);
6572         } else {
6573                 end_item = tunnel_item ? tunnel_item :
6574                            mlx5_find_end_item(integrity_item);
6575         }
6576         if (mask->l3_ok || mask->ipv4_csum_ok) {
6577                 protocol = mlx5_flow_locate_proto_l3(&item, end_item);
6578                 if (!protocol)
6579                         return rte_flow_error_set(error, EINVAL,
6580                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6581                                                   integrity_item,
6582                                                   "missing L3 protocol");
6583         }
6584         if (mask->l4_ok || mask->l4_csum_ok) {
6585                 protocol = mlx5_flow_locate_proto_l4(&item, end_item);
6586                 if (!protocol)
6587                         return rte_flow_error_set(error, EINVAL,
6588                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6589                                                   integrity_item,
6590                                                   "missing L4 protocol");
6591         }
6592         return 0;
6593 }
6594
6595 /**
6596  * Internal validation function. For validating both actions and items.
6597  *
6598  * @param[in] dev
6599  *   Pointer to the rte_eth_dev structure.
6600  * @param[in] attr
6601  *   Pointer to the flow attributes.
6602  * @param[in] items
6603  *   Pointer to the list of items.
6604  * @param[in] actions
6605  *   Pointer to the list of actions.
6606  * @param[in] external
6607  *   This flow rule is created by request external to PMD.
6608  * @param[in] hairpin
6609  *   Number of hairpin TX actions, 0 means classic flow.
6610  * @param[out] error
6611  *   Pointer to the error structure.
6612  *
6613  * @return
6614  *   0 on success, a negative errno value otherwise and rte_errno is set.
6615  */
6616 static int
6617 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
6618                  const struct rte_flow_item items[],
6619                  const struct rte_flow_action actions[],
6620                  bool external, int hairpin, struct rte_flow_error *error)
6621 {
6622         int ret;
6623         uint64_t action_flags = 0;
6624         uint64_t item_flags = 0;
6625         uint64_t last_item = 0;
6626         uint8_t next_protocol = 0xff;
6627         uint16_t ether_type = 0;
6628         int actions_n = 0;
6629         uint8_t item_ipv6_proto = 0;
6630         int fdb_mirror_limit = 0;
6631         int modify_after_mirror = 0;
6632         const struct rte_flow_item *geneve_item = NULL;
6633         const struct rte_flow_item *gre_item = NULL;
6634         const struct rte_flow_item *gtp_item = NULL;
6635         const struct rte_flow_action_raw_decap *decap;
6636         const struct rte_flow_action_raw_encap *encap;
6637         const struct rte_flow_action_rss *rss = NULL;
6638         const struct rte_flow_action_rss *sample_rss = NULL;
6639         const struct rte_flow_action_count *sample_count = NULL;
6640         const struct rte_flow_item_tcp nic_tcp_mask = {
6641                 .hdr = {
6642                         .tcp_flags = 0xFF,
6643                         .src_port = RTE_BE16(UINT16_MAX),
6644                         .dst_port = RTE_BE16(UINT16_MAX),
6645                 }
6646         };
6647         const struct rte_flow_item_ipv6 nic_ipv6_mask = {
6648                 .hdr = {
6649                         .src_addr =
6650                         "\xff\xff\xff\xff\xff\xff\xff\xff"
6651                         "\xff\xff\xff\xff\xff\xff\xff\xff",
6652                         .dst_addr =
6653                         "\xff\xff\xff\xff\xff\xff\xff\xff"
6654                         "\xff\xff\xff\xff\xff\xff\xff\xff",
6655                         .vtc_flow = RTE_BE32(0xffffffff),
6656                         .proto = 0xff,
6657                         .hop_limits = 0xff,
6658                 },
6659                 .has_frag_ext = 1,
6660         };
6661         const struct rte_flow_item_ecpri nic_ecpri_mask = {
6662                 .hdr = {
6663                         .common = {
6664                                 .u32 =
6665                                 RTE_BE32(((const struct rte_ecpri_common_hdr) {
6666                                         .type = 0xFF,
6667                                         }).u32),
6668                         },
6669                         .dummy[0] = 0xffffffff,
6670                 },
6671         };
6672         struct mlx5_priv *priv = dev->data->dev_private;
6673         struct mlx5_dev_config *dev_conf = &priv->config;
6674         uint16_t queue_index = 0xFFFF;
6675         const struct rte_flow_item_vlan *vlan_m = NULL;
6676         uint32_t rw_act_num = 0;
6677         uint64_t is_root;
6678         const struct mlx5_flow_tunnel *tunnel;
6679         enum mlx5_tof_rule_type tof_rule_type;
6680         struct flow_grp_info grp_info = {
6681                 .external = !!external,
6682                 .transfer = !!attr->transfer,
6683                 .fdb_def_rule = !!priv->fdb_def_rule,
6684                 .std_tbl_fix = true,
6685         };
6686         const struct rte_eth_hairpin_conf *conf;
6687         const struct rte_flow_item *rule_items = items;
6688         bool def_policy = false;
6689
6690         if (items == NULL)
6691                 return -1;
6692         tunnel = is_tunnel_offload_active(dev) ?
6693                  mlx5_get_tof(items, actions, &tof_rule_type) : NULL;
6694         if (tunnel) {
6695                 if (priv->representor)
6696                         return rte_flow_error_set
6697                                 (error, ENOTSUP,
6698                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6699                                  NULL, "decap not supported for VF representor");
6700                 if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_SET_RULE)
6701                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
6702                 else if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_MATCH_RULE)
6703                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |
6704                                         MLX5_FLOW_ACTION_DECAP;
6705                 grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
6706                                         (dev, attr, tunnel, tof_rule_type);
6707         }
6708         ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error);
6709         if (ret < 0)
6710                 return ret;
6711         is_root = (uint64_t)ret;
6712         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
6713                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
6714                 int type = items->type;
6715
6716                 if (!mlx5_flow_os_item_supported(type))
6717                         return rte_flow_error_set(error, ENOTSUP,
6718                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6719                                                   NULL, "item not supported");
6720                 switch (type) {
6721                 case RTE_FLOW_ITEM_TYPE_VOID:
6722                         break;
6723                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
6724                         ret = flow_dv_validate_item_port_id
6725                                         (dev, items, attr, item_flags, error);
6726                         if (ret < 0)
6727                                 return ret;
6728                         last_item = MLX5_FLOW_ITEM_PORT_ID;
6729                         break;
6730                 case RTE_FLOW_ITEM_TYPE_ETH:
6731                         ret = mlx5_flow_validate_item_eth(items, item_flags,
6732                                                           true, error);
6733                         if (ret < 0)
6734                                 return ret;
6735                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
6736                                              MLX5_FLOW_LAYER_OUTER_L2;
6737                         if (items->mask != NULL && items->spec != NULL) {
6738                                 ether_type =
6739                                         ((const struct rte_flow_item_eth *)
6740                                          items->spec)->type;
6741                                 ether_type &=
6742                                         ((const struct rte_flow_item_eth *)
6743                                          items->mask)->type;
6744                                 ether_type = rte_be_to_cpu_16(ether_type);
6745                         } else {
6746                                 ether_type = 0;
6747                         }
6748                         break;
6749                 case RTE_FLOW_ITEM_TYPE_VLAN:
6750                         ret = flow_dv_validate_item_vlan(items, item_flags,
6751                                                          dev, error);
6752                         if (ret < 0)
6753                                 return ret;
6754                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
6755                                              MLX5_FLOW_LAYER_OUTER_VLAN;
6756                         if (items->mask != NULL && items->spec != NULL) {
6757                                 ether_type =
6758                                         ((const struct rte_flow_item_vlan *)
6759                                          items->spec)->inner_type;
6760                                 ether_type &=
6761                                         ((const struct rte_flow_item_vlan *)
6762                                          items->mask)->inner_type;
6763                                 ether_type = rte_be_to_cpu_16(ether_type);
6764                         } else {
6765                                 ether_type = 0;
6766                         }
6767                         /* Store outer VLAN mask for of_push_vlan action. */
6768                         if (!tunnel)
6769                                 vlan_m = items->mask;
6770                         break;
6771                 case RTE_FLOW_ITEM_TYPE_IPV4:
6772                         mlx5_flow_tunnel_ip_check(items, next_protocol,
6773                                                   &item_flags, &tunnel);
6774                         ret = flow_dv_validate_item_ipv4(items, item_flags,
6775                                                          last_item, ether_type,
6776                                                          error);
6777                         if (ret < 0)
6778                                 return ret;
6779                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
6780                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
6781                         if (items->mask != NULL &&
6782                             ((const struct rte_flow_item_ipv4 *)
6783                              items->mask)->hdr.next_proto_id) {
6784                                 next_protocol =
6785                                         ((const struct rte_flow_item_ipv4 *)
6786                                          (items->spec))->hdr.next_proto_id;
6787                                 next_protocol &=
6788                                         ((const struct rte_flow_item_ipv4 *)
6789                                          (items->mask))->hdr.next_proto_id;
6790                         } else {
6791                                 /* Reset for inner layer. */
6792                                 next_protocol = 0xff;
6793                         }
6794                         break;
6795                 case RTE_FLOW_ITEM_TYPE_IPV6:
6796                         mlx5_flow_tunnel_ip_check(items, next_protocol,
6797                                                   &item_flags, &tunnel);
6798                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
6799                                                            last_item,
6800                                                            ether_type,
6801                                                            &nic_ipv6_mask,
6802                                                            error);
6803                         if (ret < 0)
6804                                 return ret;
6805                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
6806                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
6807                         if (items->mask != NULL &&
6808                             ((const struct rte_flow_item_ipv6 *)
6809                              items->mask)->hdr.proto) {
6810                                 item_ipv6_proto =
6811                                         ((const struct rte_flow_item_ipv6 *)
6812                                          items->spec)->hdr.proto;
6813                                 next_protocol =
6814                                         ((const struct rte_flow_item_ipv6 *)
6815                                          items->spec)->hdr.proto;
6816                                 next_protocol &=
6817                                         ((const struct rte_flow_item_ipv6 *)
6818                                          items->mask)->hdr.proto;
6819                         } else {
6820                                 /* Reset for inner layer. */
6821                                 next_protocol = 0xff;
6822                         }
6823                         break;
6824                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
6825                         ret = flow_dv_validate_item_ipv6_frag_ext(items,
6826                                                                   item_flags,
6827                                                                   error);
6828                         if (ret < 0)
6829                                 return ret;
6830                         last_item = tunnel ?
6831                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
6832                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
6833                         if (items->mask != NULL &&
6834                             ((const struct rte_flow_item_ipv6_frag_ext *)
6835                              items->mask)->hdr.next_header) {
6836                                 next_protocol =
6837                                 ((const struct rte_flow_item_ipv6_frag_ext *)
6838                                  items->spec)->hdr.next_header;
6839                                 next_protocol &=
6840                                 ((const struct rte_flow_item_ipv6_frag_ext *)
6841                                  items->mask)->hdr.next_header;
6842                         } else {
6843                                 /* Reset for inner layer. */
6844                                 next_protocol = 0xff;
6845                         }
6846                         break;
6847                 case RTE_FLOW_ITEM_TYPE_TCP:
6848                         ret = mlx5_flow_validate_item_tcp
6849                                                 (items, item_flags,
6850                                                  next_protocol,
6851                                                  &nic_tcp_mask,
6852                                                  error);
6853                         if (ret < 0)
6854                                 return ret;
6855                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
6856                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
6857                         break;
6858                 case RTE_FLOW_ITEM_TYPE_UDP:
6859                         ret = mlx5_flow_validate_item_udp(items, item_flags,
6860                                                           next_protocol,
6861                                                           error);
6862                         if (ret < 0)
6863                                 return ret;
6864                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
6865                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
6866                         break;
6867                 case RTE_FLOW_ITEM_TYPE_GRE:
6868                         ret = mlx5_flow_validate_item_gre(items, item_flags,
6869                                                           next_protocol, error);
6870                         if (ret < 0)
6871                                 return ret;
6872                         gre_item = items;
6873                         last_item = MLX5_FLOW_LAYER_GRE;
6874                         break;
6875                 case RTE_FLOW_ITEM_TYPE_NVGRE:
6876                         ret = mlx5_flow_validate_item_nvgre(items, item_flags,
6877                                                             next_protocol,
6878                                                             error);
6879                         if (ret < 0)
6880                                 return ret;
6881                         last_item = MLX5_FLOW_LAYER_NVGRE;
6882                         break;
6883                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
6884                         ret = mlx5_flow_validate_item_gre_key
6885                                 (items, item_flags, gre_item, error);
6886                         if (ret < 0)
6887                                 return ret;
6888                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
6889                         break;
6890                 case RTE_FLOW_ITEM_TYPE_VXLAN:
6891                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
6892                                                             error);
6893                         if (ret < 0)
6894                                 return ret;
6895                         last_item = MLX5_FLOW_LAYER_VXLAN;
6896                         break;
6897                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
6898                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
6899                                                                 item_flags, dev,
6900                                                                 error);
6901                         if (ret < 0)
6902                                 return ret;
6903                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
6904                         break;
6905                 case RTE_FLOW_ITEM_TYPE_GENEVE:
6906                         ret = mlx5_flow_validate_item_geneve(items,
6907                                                              item_flags, dev,
6908                                                              error);
6909                         if (ret < 0)
6910                                 return ret;
6911                         geneve_item = items;
6912                         last_item = MLX5_FLOW_LAYER_GENEVE;
6913                         break;
6914                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
6915                         ret = mlx5_flow_validate_item_geneve_opt(items,
6916                                                                  last_item,
6917                                                                  geneve_item,
6918                                                                  dev,
6919                                                                  error);
6920                         if (ret < 0)
6921                                 return ret;
6922                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
6923                         break;
6924                 case RTE_FLOW_ITEM_TYPE_MPLS:
6925                         ret = mlx5_flow_validate_item_mpls(dev, items,
6926                                                            item_flags,
6927                                                            last_item, error);
6928                         if (ret < 0)
6929                                 return ret;
6930                         last_item = MLX5_FLOW_LAYER_MPLS;
6931                         break;
6932
6933                 case RTE_FLOW_ITEM_TYPE_MARK:
6934                         ret = flow_dv_validate_item_mark(dev, items, attr,
6935                                                          error);
6936                         if (ret < 0)
6937                                 return ret;
6938                         last_item = MLX5_FLOW_ITEM_MARK;
6939                         break;
6940                 case RTE_FLOW_ITEM_TYPE_META:
6941                         ret = flow_dv_validate_item_meta(dev, items, attr,
6942                                                          error);
6943                         if (ret < 0)
6944                                 return ret;
6945                         last_item = MLX5_FLOW_ITEM_METADATA;
6946                         break;
6947                 case RTE_FLOW_ITEM_TYPE_ICMP:
6948                         ret = mlx5_flow_validate_item_icmp(items, item_flags,
6949                                                            next_protocol,
6950                                                            error);
6951                         if (ret < 0)
6952                                 return ret;
6953                         last_item = MLX5_FLOW_LAYER_ICMP;
6954                         break;
6955                 case RTE_FLOW_ITEM_TYPE_ICMP6:
6956                         ret = mlx5_flow_validate_item_icmp6(items, item_flags,
6957                                                             next_protocol,
6958                                                             error);
6959                         if (ret < 0)
6960                                 return ret;
6961                         item_ipv6_proto = IPPROTO_ICMPV6;
6962                         last_item = MLX5_FLOW_LAYER_ICMP6;
6963                         break;
6964                 case RTE_FLOW_ITEM_TYPE_TAG:
6965                         ret = flow_dv_validate_item_tag(dev, items,
6966                                                         attr, error);
6967                         if (ret < 0)
6968                                 return ret;
6969                         last_item = MLX5_FLOW_ITEM_TAG;
6970                         break;
6971                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
6972                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
6973                         break;
6974                 case RTE_FLOW_ITEM_TYPE_GTP:
6975                         ret = flow_dv_validate_item_gtp(dev, items, item_flags,
6976                                                         error);
6977                         if (ret < 0)
6978                                 return ret;
6979                         gtp_item = items;
6980                         last_item = MLX5_FLOW_LAYER_GTP;
6981                         break;
6982                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
6983                         ret = flow_dv_validate_item_gtp_psc(items, last_item,
6984                                                             gtp_item, attr,
6985                                                             error);
6986                         if (ret < 0)
6987                                 return ret;
6988                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
6989                         break;
6990                 case RTE_FLOW_ITEM_TYPE_ECPRI:
6991                         /* Capacity will be checked in the translate stage. */
6992                         ret = mlx5_flow_validate_item_ecpri(items, item_flags,
6993                                                             last_item,
6994                                                             ether_type,
6995                                                             &nic_ecpri_mask,
6996                                                             error);
6997                         if (ret < 0)
6998                                 return ret;
6999                         last_item = MLX5_FLOW_LAYER_ECPRI;
7000                         break;
7001                 case RTE_FLOW_ITEM_TYPE_INTEGRITY:
7002                         if (item_flags & MLX5_FLOW_ITEM_INTEGRITY)
7003                                 return rte_flow_error_set
7004                                         (error, ENOTSUP,
7005                                          RTE_FLOW_ERROR_TYPE_ITEM,
7006                                          NULL, "multiple integrity items not supported");
7007                         ret = flow_dv_validate_item_integrity(dev, rule_items,
7008                                                               items, error);
7009                         if (ret < 0)
7010                                 return ret;
7011                         last_item = MLX5_FLOW_ITEM_INTEGRITY;
7012                         break;
7013                 case RTE_FLOW_ITEM_TYPE_CONNTRACK:
7014                         ret = flow_dv_validate_item_aso_ct(dev, items,
7015                                                            &item_flags, error);
7016                         if (ret < 0)
7017                                 return ret;
7018                         break;
7019                 case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
7020                         /* tunnel offload item was processed before
7021                          * list it here as a supported type
7022                          */
7023                         break;
7024                 default:
7025                         return rte_flow_error_set(error, ENOTSUP,
7026                                                   RTE_FLOW_ERROR_TYPE_ITEM,
7027                                                   NULL, "item not supported");
7028                 }
7029                 item_flags |= last_item;
7030         }
7031         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
7032                 int type = actions->type;
7033                 bool shared_count = false;
7034
7035                 if (!mlx5_flow_os_action_supported(type))
7036                         return rte_flow_error_set(error, ENOTSUP,
7037                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7038                                                   actions,
7039                                                   "action not supported");
7040                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
7041                         return rte_flow_error_set(error, ENOTSUP,
7042                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7043                                                   actions, "too many actions");
7044                 if (action_flags &
7045                         MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
7046                         return rte_flow_error_set(error, ENOTSUP,
7047                                 RTE_FLOW_ERROR_TYPE_ACTION,
7048                                 NULL, "meter action with policy "
7049                                 "must be the last action");
7050                 switch (type) {
7051                 case RTE_FLOW_ACTION_TYPE_VOID:
7052                         break;
7053                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
7054                         ret = flow_dv_validate_action_port_id(dev,
7055                                                               action_flags,
7056                                                               actions,
7057                                                               attr,
7058                                                               error);
7059                         if (ret)
7060                                 return ret;
7061                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
7062                         ++actions_n;
7063                         break;
7064                 case RTE_FLOW_ACTION_TYPE_FLAG:
7065                         ret = flow_dv_validate_action_flag(dev, action_flags,
7066                                                            attr, error);
7067                         if (ret < 0)
7068                                 return ret;
7069                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
7070                                 /* Count all modify-header actions as one. */
7071                                 if (!(action_flags &
7072                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
7073                                         ++actions_n;
7074                                 action_flags |= MLX5_FLOW_ACTION_FLAG |
7075                                                 MLX5_FLOW_ACTION_MARK_EXT;
7076                                 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7077                                         modify_after_mirror = 1;
7078
7079                         } else {
7080                                 action_flags |= MLX5_FLOW_ACTION_FLAG;
7081                                 ++actions_n;
7082                         }
7083                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
7084                         break;
7085                 case RTE_FLOW_ACTION_TYPE_MARK:
7086                         ret = flow_dv_validate_action_mark(dev, actions,
7087                                                            action_flags,
7088                                                            attr, error);
7089                         if (ret < 0)
7090                                 return ret;
7091                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
7092                                 /* Count all modify-header actions as one. */
7093                                 if (!(action_flags &
7094                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
7095                                         ++actions_n;
7096                                 action_flags |= MLX5_FLOW_ACTION_MARK |
7097                                                 MLX5_FLOW_ACTION_MARK_EXT;
7098                                 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7099                                         modify_after_mirror = 1;
7100                         } else {
7101                                 action_flags |= MLX5_FLOW_ACTION_MARK;
7102                                 ++actions_n;
7103                         }
7104                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
7105                         break;
7106                 case RTE_FLOW_ACTION_TYPE_SET_META:
7107                         ret = flow_dv_validate_action_set_meta(dev, actions,
7108                                                                action_flags,
7109                                                                attr, error);
7110                         if (ret < 0)
7111                                 return ret;
7112                         /* Count all modify-header actions as one action. */
7113                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7114                                 ++actions_n;
7115                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7116                                 modify_after_mirror = 1;
7117                         action_flags |= MLX5_FLOW_ACTION_SET_META;
7118                         rw_act_num += MLX5_ACT_NUM_SET_META;
7119                         break;
7120                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
7121                         ret = flow_dv_validate_action_set_tag(dev, actions,
7122                                                               action_flags,
7123                                                               attr, error);
7124                         if (ret < 0)
7125                                 return ret;
7126                         /* Count all modify-header actions as one action. */
7127                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7128                                 ++actions_n;
7129                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7130                                 modify_after_mirror = 1;
7131                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
7132                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7133                         break;
7134                 case RTE_FLOW_ACTION_TYPE_DROP:
7135                         ret = mlx5_flow_validate_action_drop(action_flags,
7136                                                              attr, error);
7137                         if (ret < 0)
7138                                 return ret;
7139                         action_flags |= MLX5_FLOW_ACTION_DROP;
7140                         ++actions_n;
7141                         break;
7142                 case RTE_FLOW_ACTION_TYPE_QUEUE:
7143                         ret = mlx5_flow_validate_action_queue(actions,
7144                                                               action_flags, dev,
7145                                                               attr, error);
7146                         if (ret < 0)
7147                                 return ret;
7148                         queue_index = ((const struct rte_flow_action_queue *)
7149                                                         (actions->conf))->index;
7150                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
7151                         ++actions_n;
7152                         break;
7153                 case RTE_FLOW_ACTION_TYPE_RSS:
7154                         rss = actions->conf;
7155                         ret = mlx5_flow_validate_action_rss(actions,
7156                                                             action_flags, dev,
7157                                                             attr, item_flags,
7158                                                             error);
7159                         if (ret < 0)
7160                                 return ret;
7161                         if (rss && sample_rss &&
7162                             (sample_rss->level != rss->level ||
7163                             sample_rss->types != rss->types))
7164                                 return rte_flow_error_set(error, ENOTSUP,
7165                                         RTE_FLOW_ERROR_TYPE_ACTION,
7166                                         NULL,
7167                                         "Can't use the different RSS types "
7168                                         "or level in the same flow");
7169                         if (rss != NULL && rss->queue_num)
7170                                 queue_index = rss->queue[0];
7171                         action_flags |= MLX5_FLOW_ACTION_RSS;
7172                         ++actions_n;
7173                         break;
7174                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
7175                         ret =
7176                         mlx5_flow_validate_action_default_miss(action_flags,
7177                                         attr, error);
7178                         if (ret < 0)
7179                                 return ret;
7180                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
7181                         ++actions_n;
7182                         break;
7183                 case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
7184                 case RTE_FLOW_ACTION_TYPE_COUNT:
7185                         shared_count = is_shared_action_count(actions);
7186                         ret = flow_dv_validate_action_count(dev, shared_count,
7187                                                             action_flags,
7188                                                             error);
7189                         if (ret < 0)
7190                                 return ret;
7191                         action_flags |= MLX5_FLOW_ACTION_COUNT;
7192                         ++actions_n;
7193                         break;
7194                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
7195                         if (flow_dv_validate_action_pop_vlan(dev,
7196                                                              action_flags,
7197                                                              actions,
7198                                                              item_flags, attr,
7199                                                              error))
7200                                 return -rte_errno;
7201                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7202                                 modify_after_mirror = 1;
7203                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
7204                         ++actions_n;
7205                         break;
7206                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
7207                         ret = flow_dv_validate_action_push_vlan(dev,
7208                                                                 action_flags,
7209                                                                 vlan_m,
7210                                                                 actions, attr,
7211                                                                 error);
7212                         if (ret < 0)
7213                                 return ret;
7214                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7215                                 modify_after_mirror = 1;
7216                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
7217                         ++actions_n;
7218                         break;
7219                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
7220                         ret = flow_dv_validate_action_set_vlan_pcp
7221                                                 (action_flags, actions, error);
7222                         if (ret < 0)
7223                                 return ret;
7224                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7225                                 modify_after_mirror = 1;
7226                         /* Count PCP with push_vlan command. */
7227                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
7228                         break;
7229                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
7230                         ret = flow_dv_validate_action_set_vlan_vid
7231                                                 (item_flags, action_flags,
7232                                                  actions, error);
7233                         if (ret < 0)
7234                                 return ret;
7235                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7236                                 modify_after_mirror = 1;
7237                         /* Count VID with push_vlan command. */
7238                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
7239                         rw_act_num += MLX5_ACT_NUM_MDF_VID;
7240                         break;
7241                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
7242                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
7243                         ret = flow_dv_validate_action_l2_encap(dev,
7244                                                                action_flags,
7245                                                                actions, attr,
7246                                                                error);
7247                         if (ret < 0)
7248                                 return ret;
7249                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
7250                         ++actions_n;
7251                         break;
7252                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
7253                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
7254                         ret = flow_dv_validate_action_decap(dev, action_flags,
7255                                                             actions, item_flags,
7256                                                             attr, error);
7257                         if (ret < 0)
7258                                 return ret;
7259                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7260                                 modify_after_mirror = 1;
7261                         action_flags |= MLX5_FLOW_ACTION_DECAP;
7262                         ++actions_n;
7263                         break;
7264                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
7265                         ret = flow_dv_validate_action_raw_encap_decap
7266                                 (dev, NULL, actions->conf, attr, &action_flags,
7267                                  &actions_n, actions, item_flags, error);
7268                         if (ret < 0)
7269                                 return ret;
7270                         break;
7271                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
7272                         decap = actions->conf;
7273                         while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
7274                                 ;
7275                         if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
7276                                 encap = NULL;
7277                                 actions--;
7278                         } else {
7279                                 encap = actions->conf;
7280                         }
7281                         ret = flow_dv_validate_action_raw_encap_decap
7282                                            (dev,
7283                                             decap ? decap : &empty_decap, encap,
7284                                             attr, &action_flags, &actions_n,
7285                                             actions, item_flags, error);
7286                         if (ret < 0)
7287                                 return ret;
7288                         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
7289                             (action_flags & MLX5_FLOW_ACTION_DECAP))
7290                                 modify_after_mirror = 1;
7291                         break;
7292                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
7293                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
7294                         ret = flow_dv_validate_action_modify_mac(action_flags,
7295                                                                  actions,
7296                                                                  item_flags,
7297                                                                  error);
7298                         if (ret < 0)
7299                                 return ret;
7300                         /* Count all modify-header actions as one action. */
7301                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7302                                 ++actions_n;
7303                         action_flags |= actions->type ==
7304                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
7305                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
7306                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
7307                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7308                                 modify_after_mirror = 1;
7309                         /*
7310                          * Even if the source and destination MAC addresses have
7311                          * overlap in the header with 4B alignment, the convert
7312                          * function will handle them separately and 4 SW actions
7313                          * will be created. And 2 actions will be added each
7314                          * time no matter how many bytes of address will be set.
7315                          */
7316                         rw_act_num += MLX5_ACT_NUM_MDF_MAC;
7317                         break;
7318                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
7319                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
7320                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
7321                                                                   actions,
7322                                                                   item_flags,
7323                                                                   error);
7324                         if (ret < 0)
7325                                 return ret;
7326                         /* Count all modify-header actions as one action. */
7327                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7328                                 ++actions_n;
7329                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7330                                 modify_after_mirror = 1;
7331                         action_flags |= actions->type ==
7332                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
7333                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
7334                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
7335                         rw_act_num += MLX5_ACT_NUM_MDF_IPV4;
7336                         break;
7337                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
7338                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
7339                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
7340                                                                   actions,
7341                                                                   item_flags,
7342                                                                   error);
7343                         if (ret < 0)
7344                                 return ret;
7345                         if (item_ipv6_proto == IPPROTO_ICMPV6)
7346                                 return rte_flow_error_set(error, ENOTSUP,
7347                                         RTE_FLOW_ERROR_TYPE_ACTION,
7348                                         actions,
7349                                         "Can't change header "
7350                                         "with ICMPv6 proto");
7351                         /* Count all modify-header actions as one action. */
7352                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7353                                 ++actions_n;
7354                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7355                                 modify_after_mirror = 1;
7356                         action_flags |= actions->type ==
7357                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
7358                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
7359                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
7360                         rw_act_num += MLX5_ACT_NUM_MDF_IPV6;
7361                         break;
7362                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
7363                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
7364                         ret = flow_dv_validate_action_modify_tp(action_flags,
7365                                                                 actions,
7366                                                                 item_flags,
7367                                                                 error);
7368                         if (ret < 0)
7369                                 return ret;
7370                         /* Count all modify-header actions as one action. */
7371                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7372                                 ++actions_n;
7373                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7374                                 modify_after_mirror = 1;
7375                         action_flags |= actions->type ==
7376                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
7377                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
7378                                                 MLX5_FLOW_ACTION_SET_TP_DST;
7379                         rw_act_num += MLX5_ACT_NUM_MDF_PORT;
7380                         break;
7381                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
7382                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
7383                         ret = flow_dv_validate_action_modify_ttl(action_flags,
7384                                                                  actions,
7385                                                                  item_flags,
7386                                                                  error);
7387                         if (ret < 0)
7388                                 return ret;
7389                         /* Count all modify-header actions as one action. */
7390                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7391                                 ++actions_n;
7392                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7393                                 modify_after_mirror = 1;
7394                         action_flags |= actions->type ==
7395                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
7396                                                 MLX5_FLOW_ACTION_SET_TTL :
7397                                                 MLX5_FLOW_ACTION_DEC_TTL;
7398                         rw_act_num += MLX5_ACT_NUM_MDF_TTL;
7399                         break;
7400                 case RTE_FLOW_ACTION_TYPE_JUMP:
7401                         ret = flow_dv_validate_action_jump(dev, tunnel, actions,
7402                                                            action_flags,
7403                                                            attr, external,
7404                                                            error);
7405                         if (ret)
7406                                 return ret;
7407                         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
7408                             fdb_mirror_limit)
7409                                 return rte_flow_error_set(error, EINVAL,
7410                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7411                                                   NULL,
7412                                                   "sample and jump action combination is not supported");
7413                         ++actions_n;
7414                         action_flags |= MLX5_FLOW_ACTION_JUMP;
7415                         break;
7416                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
7417                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
7418                         ret = flow_dv_validate_action_modify_tcp_seq
7419                                                                 (action_flags,
7420                                                                  actions,
7421                                                                  item_flags,
7422                                                                  error);
7423                         if (ret < 0)
7424                                 return ret;
7425                         /* Count all modify-header actions as one action. */
7426                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7427                                 ++actions_n;
7428                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7429                                 modify_after_mirror = 1;
7430                         action_flags |= actions->type ==
7431                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
7432                                                 MLX5_FLOW_ACTION_INC_TCP_SEQ :
7433                                                 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
7434                         rw_act_num += MLX5_ACT_NUM_MDF_TCPSEQ;
7435                         break;
7436                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
7437                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
7438                         ret = flow_dv_validate_action_modify_tcp_ack
7439                                                                 (action_flags,
7440                                                                  actions,
7441                                                                  item_flags,
7442                                                                  error);
7443                         if (ret < 0)
7444                                 return ret;
7445                         /* Count all modify-header actions as one action. */
7446                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7447                                 ++actions_n;
7448                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7449                                 modify_after_mirror = 1;
7450                         action_flags |= actions->type ==
7451                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
7452                                                 MLX5_FLOW_ACTION_INC_TCP_ACK :
7453                                                 MLX5_FLOW_ACTION_DEC_TCP_ACK;
7454                         rw_act_num += MLX5_ACT_NUM_MDF_TCPACK;
7455                         break;
7456                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
7457                         break;
7458                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
7459                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
7460                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7461                         break;
7462                 case RTE_FLOW_ACTION_TYPE_METER:
7463                         ret = mlx5_flow_validate_action_meter(dev,
7464                                                               action_flags,
7465                                                               actions, attr,
7466                                                               &def_policy,
7467                                                               error);
7468                         if (ret < 0)
7469                                 return ret;
7470                         action_flags |= MLX5_FLOW_ACTION_METER;
7471                         if (!def_policy)
7472                                 action_flags |=
7473                                 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
7474                         ++actions_n;
7475                         /* Meter action will add one more TAG action. */
7476                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7477                         break;
7478                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
7479                         if (!attr->transfer && !attr->group)
7480                                 return rte_flow_error_set(error, ENOTSUP,
7481                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7482                                                                            NULL,
7483                           "Shared ASO age action is not supported for group 0");
7484                         if (action_flags & MLX5_FLOW_ACTION_AGE)
7485                                 return rte_flow_error_set
7486                                                   (error, EINVAL,
7487                                                    RTE_FLOW_ERROR_TYPE_ACTION,
7488                                                    NULL,
7489                                                    "duplicate age actions set");
7490                         action_flags |= MLX5_FLOW_ACTION_AGE;
7491                         ++actions_n;
7492                         break;
7493                 case RTE_FLOW_ACTION_TYPE_AGE:
7494                         ret = flow_dv_validate_action_age(action_flags,
7495                                                           actions, dev,
7496                                                           error);
7497                         if (ret < 0)
7498                                 return ret;
7499                         /*
7500                          * Validate the regular AGE action (using counter)
7501                          * mutual exclusion with share counter actions.
7502                          */
7503                         if (!priv->sh->flow_hit_aso_en) {
7504                                 if (shared_count)
7505                                         return rte_flow_error_set
7506                                                 (error, EINVAL,
7507                                                 RTE_FLOW_ERROR_TYPE_ACTION,
7508                                                 NULL,
7509                                                 "old age and shared count combination is not supported");
7510                                 if (sample_count)
7511                                         return rte_flow_error_set
7512                                                 (error, EINVAL,
7513                                                 RTE_FLOW_ERROR_TYPE_ACTION,
7514                                                 NULL,
7515                                                 "old age action and count must be in the same sub flow");
7516                         }
7517                         action_flags |= MLX5_FLOW_ACTION_AGE;
7518                         ++actions_n;
7519                         break;
7520                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
7521                         ret = flow_dv_validate_action_modify_ipv4_dscp
7522                                                          (action_flags,
7523                                                           actions,
7524                                                           item_flags,
7525                                                           error);
7526                         if (ret < 0)
7527                                 return ret;
7528                         /* Count all modify-header actions as one action. */
7529                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7530                                 ++actions_n;
7531                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7532                                 modify_after_mirror = 1;
7533                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
7534                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
7535                         break;
7536                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
7537                         ret = flow_dv_validate_action_modify_ipv6_dscp
7538                                                                 (action_flags,
7539                                                                  actions,
7540                                                                  item_flags,
7541                                                                  error);
7542                         if (ret < 0)
7543                                 return ret;
7544                         /* Count all modify-header actions as one action. */
7545                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7546                                 ++actions_n;
7547                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7548                                 modify_after_mirror = 1;
7549                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
7550                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
7551                         break;
7552                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
7553                         ret = flow_dv_validate_action_sample(&action_flags,
7554                                                              actions, dev,
7555                                                              attr, item_flags,
7556                                                              rss, &sample_rss,
7557                                                              &sample_count,
7558                                                              &fdb_mirror_limit,
7559                                                              error);
7560                         if (ret < 0)
7561                                 return ret;
7562                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
7563                         ++actions_n;
7564                         break;
7565                 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
7566                         ret = flow_dv_validate_action_modify_field(dev,
7567                                                                    action_flags,
7568                                                                    actions,
7569                                                                    attr,
7570                                                                    error);
7571                         if (ret < 0)
7572                                 return ret;
7573                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7574                                 modify_after_mirror = 1;
7575                         /* Count all modify-header actions as one action. */
7576                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7577                                 ++actions_n;
7578                         action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
7579                         rw_act_num += ret;
7580                         break;
7581                 case RTE_FLOW_ACTION_TYPE_CONNTRACK:
7582                         ret = flow_dv_validate_action_aso_ct(dev, action_flags,
7583                                                              item_flags, attr,
7584                                                              error);
7585                         if (ret < 0)
7586                                 return ret;
7587                         action_flags |= MLX5_FLOW_ACTION_CT;
7588                         break;
7589                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
7590                         /* tunnel offload action was processed before
7591                          * list it here as a supported type
7592                          */
7593                         break;
7594                 default:
7595                         return rte_flow_error_set(error, ENOTSUP,
7596                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7597                                                   actions,
7598                                                   "action not supported");
7599                 }
7600         }
7601         /*
7602          * Validate actions in flow rules
7603          * - Explicit decap action is prohibited by the tunnel offload API.
7604          * - Drop action in tunnel steer rule is prohibited by the API.
7605          * - Application cannot use MARK action because it's value can mask
7606          *   tunnel default miss nitification.
7607          * - JUMP in tunnel match rule has no support in current PMD
7608          *   implementation.
7609          * - TAG & META are reserved for future uses.
7610          */
7611         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_SET) {
7612                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_DECAP    |
7613                                             MLX5_FLOW_ACTION_MARK     |
7614                                             MLX5_FLOW_ACTION_SET_TAG  |
7615                                             MLX5_FLOW_ACTION_SET_META |
7616                                             MLX5_FLOW_ACTION_DROP;
7617
7618                 if (action_flags & bad_actions_mask)
7619                         return rte_flow_error_set
7620                                         (error, EINVAL,
7621                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7622                                         "Invalid RTE action in tunnel "
7623                                         "set decap rule");
7624                 if (!(action_flags & MLX5_FLOW_ACTION_JUMP))
7625                         return rte_flow_error_set
7626                                         (error, EINVAL,
7627                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7628                                         "tunnel set decap rule must terminate "
7629                                         "with JUMP");
7630                 if (!attr->ingress)
7631                         return rte_flow_error_set
7632                                         (error, EINVAL,
7633                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7634                                         "tunnel flows for ingress traffic only");
7635         }
7636         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_MATCH) {
7637                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_JUMP    |
7638                                             MLX5_FLOW_ACTION_MARK    |
7639                                             MLX5_FLOW_ACTION_SET_TAG |
7640                                             MLX5_FLOW_ACTION_SET_META;
7641
7642                 if (action_flags & bad_actions_mask)
7643                         return rte_flow_error_set
7644                                         (error, EINVAL,
7645                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7646                                         "Invalid RTE action in tunnel "
7647                                         "set match rule");
7648         }
7649         /*
7650          * Validate the drop action mutual exclusion with other actions.
7651          * Drop action is mutually-exclusive with any other action, except for
7652          * Count action.
7653          * Drop action compatibility with tunnel offload was already validated.
7654          */
7655         if (action_flags & (MLX5_FLOW_ACTION_TUNNEL_MATCH |
7656                             MLX5_FLOW_ACTION_TUNNEL_MATCH));
7657         else if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
7658             (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
7659                 return rte_flow_error_set(error, EINVAL,
7660                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7661                                           "Drop action is mutually-exclusive "
7662                                           "with any other action, except for "
7663                                           "Count action");
7664         /* Eswitch has few restrictions on using items and actions */
7665         if (attr->transfer) {
7666                 if (!mlx5_flow_ext_mreg_supported(dev) &&
7667                     action_flags & MLX5_FLOW_ACTION_FLAG)
7668                         return rte_flow_error_set(error, ENOTSUP,
7669                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7670                                                   NULL,
7671                                                   "unsupported action FLAG");
7672                 if (!mlx5_flow_ext_mreg_supported(dev) &&
7673                     action_flags & MLX5_FLOW_ACTION_MARK)
7674                         return rte_flow_error_set(error, ENOTSUP,
7675                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7676                                                   NULL,
7677                                                   "unsupported action MARK");
7678                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
7679                         return rte_flow_error_set(error, ENOTSUP,
7680                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7681                                                   NULL,
7682                                                   "unsupported action QUEUE");
7683                 if (action_flags & MLX5_FLOW_ACTION_RSS)
7684                         return rte_flow_error_set(error, ENOTSUP,
7685                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7686                                                   NULL,
7687                                                   "unsupported action RSS");
7688                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
7689                         return rte_flow_error_set(error, EINVAL,
7690                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7691                                                   actions,
7692                                                   "no fate action is found");
7693         } else {
7694                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
7695                         return rte_flow_error_set(error, EINVAL,
7696                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7697                                                   actions,
7698                                                   "no fate action is found");
7699         }
7700         /*
7701          * Continue validation for Xcap and VLAN actions.
7702          * If hairpin is working in explicit TX rule mode, there is no actions
7703          * splitting and the validation of hairpin ingress flow should be the
7704          * same as other standard flows.
7705          */
7706         if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS |
7707                              MLX5_FLOW_VLAN_ACTIONS)) &&
7708             (queue_index == 0xFFFF ||
7709              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN ||
7710              ((conf = mlx5_rxq_get_hairpin_conf(dev, queue_index)) != NULL &&
7711              conf->tx_explicit != 0))) {
7712                 if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
7713                     MLX5_FLOW_XCAP_ACTIONS)
7714                         return rte_flow_error_set(error, ENOTSUP,
7715                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7716                                                   NULL, "encap and decap "
7717                                                   "combination aren't supported");
7718                 if (!attr->transfer && attr->ingress) {
7719                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
7720                                 return rte_flow_error_set
7721                                                 (error, ENOTSUP,
7722                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7723                                                  NULL, "encap is not supported"
7724                                                  " for ingress traffic");
7725                         else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
7726                                 return rte_flow_error_set
7727                                                 (error, ENOTSUP,
7728                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7729                                                  NULL, "push VLAN action not "
7730                                                  "supported for ingress");
7731                         else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) ==
7732                                         MLX5_FLOW_VLAN_ACTIONS)
7733                                 return rte_flow_error_set
7734                                                 (error, ENOTSUP,
7735                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7736                                                  NULL, "no support for "
7737                                                  "multiple VLAN actions");
7738                 }
7739         }
7740         if (action_flags & MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY) {
7741                 if ((action_flags & (MLX5_FLOW_FATE_ACTIONS &
7742                         ~MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)) &&
7743                         attr->ingress)
7744                         return rte_flow_error_set
7745                                 (error, ENOTSUP,
7746                                 RTE_FLOW_ERROR_TYPE_ACTION,
7747                                 NULL, "fate action not supported for "
7748                                 "meter with policy");
7749                 if (attr->egress) {
7750                         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
7751                                 return rte_flow_error_set
7752                                         (error, ENOTSUP,
7753                                         RTE_FLOW_ERROR_TYPE_ACTION,
7754                                         NULL, "modify header action in egress "
7755                                         "cannot be done before meter action");
7756                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
7757                                 return rte_flow_error_set
7758                                         (error, ENOTSUP,
7759                                         RTE_FLOW_ERROR_TYPE_ACTION,
7760                                         NULL, "encap action in egress "
7761                                         "cannot be done before meter action");
7762                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
7763                                 return rte_flow_error_set
7764                                         (error, ENOTSUP,
7765                                         RTE_FLOW_ERROR_TYPE_ACTION,
7766                                         NULL, "push vlan action in egress "
7767                                         "cannot be done before meter action");
7768                 }
7769         }
7770         /*
7771          * Hairpin flow will add one more TAG action in TX implicit mode.
7772          * In TX explicit mode, there will be no hairpin flow ID.
7773          */
7774         if (hairpin > 0)
7775                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
7776         /* extra metadata enabled: one more TAG action will be add. */
7777         if (dev_conf->dv_flow_en &&
7778             dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
7779             mlx5_flow_ext_mreg_supported(dev))
7780                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
7781         if (rw_act_num >
7782                         flow_dv_modify_hdr_action_max(dev, is_root)) {
7783                 return rte_flow_error_set(error, ENOTSUP,
7784                                           RTE_FLOW_ERROR_TYPE_ACTION,
7785                                           NULL, "too many header modify"
7786                                           " actions to support");
7787         }
7788         /* Eswitch egress mirror and modify flow has limitation on CX5 */
7789         if (fdb_mirror_limit && modify_after_mirror)
7790                 return rte_flow_error_set(error, EINVAL,
7791                                 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7792                                 "sample before modify action is not supported");
7793         return 0;
7794 }
7795
7796 /**
7797  * Internal preparation function. Allocates the DV flow size,
7798  * this size is constant.
7799  *
7800  * @param[in] dev
7801  *   Pointer to the rte_eth_dev structure.
7802  * @param[in] attr
7803  *   Pointer to the flow attributes.
7804  * @param[in] items
7805  *   Pointer to the list of items.
7806  * @param[in] actions
7807  *   Pointer to the list of actions.
7808  * @param[out] error
7809  *   Pointer to the error structure.
7810  *
7811  * @return
7812  *   Pointer to mlx5_flow object on success,
7813  *   otherwise NULL and rte_errno is set.
7814  */
7815 static struct mlx5_flow *
7816 flow_dv_prepare(struct rte_eth_dev *dev,
7817                 const struct rte_flow_attr *attr __rte_unused,
7818                 const struct rte_flow_item items[] __rte_unused,
7819                 const struct rte_flow_action actions[] __rte_unused,
7820                 struct rte_flow_error *error)
7821 {
7822         uint32_t handle_idx = 0;
7823         struct mlx5_flow *dev_flow;
7824         struct mlx5_flow_handle *dev_handle;
7825         struct mlx5_priv *priv = dev->data->dev_private;
7826         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
7827
7828         MLX5_ASSERT(wks);
7829         wks->skip_matcher_reg = 0;
7830         /* In case of corrupting the memory. */
7831         if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
7832                 rte_flow_error_set(error, ENOSPC,
7833                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7834                                    "not free temporary device flow");
7835                 return NULL;
7836         }
7837         dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
7838                                    &handle_idx);
7839         if (!dev_handle) {
7840                 rte_flow_error_set(error, ENOMEM,
7841                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7842                                    "not enough memory to create flow handle");
7843                 return NULL;
7844         }
7845         MLX5_ASSERT(wks->flow_idx < RTE_DIM(wks->flows));
7846         dev_flow = &wks->flows[wks->flow_idx++];
7847         memset(dev_flow, 0, sizeof(*dev_flow));
7848         dev_flow->handle = dev_handle;
7849         dev_flow->handle_idx = handle_idx;
7850         /*
7851          * In some old rdma-core releases, before continuing, a check of the
7852          * length of matching parameter will be done at first. It needs to use
7853          * the length without misc4 param. If the flow has misc4 support, then
7854          * the length needs to be adjusted accordingly. Each param member is
7855          * aligned with a 64B boundary naturally.
7856          */
7857         dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param) -
7858                                   MLX5_ST_SZ_BYTES(fte_match_set_misc4);
7859         dev_flow->ingress = attr->ingress;
7860         dev_flow->dv.transfer = attr->transfer;
7861         return dev_flow;
7862 }
7863
7864 #ifdef RTE_LIBRTE_MLX5_DEBUG
7865 /**
7866  * Sanity check for match mask and value. Similar to check_valid_spec() in
7867  * kernel driver. If unmasked bit is present in value, it returns failure.
7868  *
7869  * @param match_mask
7870  *   pointer to match mask buffer.
7871  * @param match_value
7872  *   pointer to match value buffer.
7873  *
7874  * @return
7875  *   0 if valid, -EINVAL otherwise.
7876  */
7877 static int
7878 flow_dv_check_valid_spec(void *match_mask, void *match_value)
7879 {
7880         uint8_t *m = match_mask;
7881         uint8_t *v = match_value;
7882         unsigned int i;
7883
7884         for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
7885                 if (v[i] & ~m[i]) {
7886                         DRV_LOG(ERR,
7887                                 "match_value differs from match_criteria"
7888                                 " %p[%u] != %p[%u]",
7889                                 match_value, i, match_mask, i);
7890                         return -EINVAL;
7891                 }
7892         }
7893         return 0;
7894 }
7895 #endif
7896
7897 /**
7898  * Add match of ip_version.
7899  *
7900  * @param[in] group
7901  *   Flow group.
7902  * @param[in] headers_v
7903  *   Values header pointer.
7904  * @param[in] headers_m
7905  *   Masks header pointer.
7906  * @param[in] ip_version
7907  *   The IP version to set.
7908  */
7909 static inline void
7910 flow_dv_set_match_ip_version(uint32_t group,
7911                              void *headers_v,
7912                              void *headers_m,
7913                              uint8_t ip_version)
7914 {
7915         if (group == 0)
7916                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
7917         else
7918                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
7919                          ip_version);
7920         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version);
7921         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0);
7922         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0);
7923 }
7924
7925 /**
7926  * Add Ethernet item to matcher and to the value.
7927  *
7928  * @param[in, out] matcher
7929  *   Flow matcher.
7930  * @param[in, out] key
7931  *   Flow matcher value.
7932  * @param[in] item
7933  *   Flow pattern to translate.
7934  * @param[in] inner
7935  *   Item is inner pattern.
7936  */
7937 static void
7938 flow_dv_translate_item_eth(void *matcher, void *key,
7939                            const struct rte_flow_item *item, int inner,
7940                            uint32_t group)
7941 {
7942         const struct rte_flow_item_eth *eth_m = item->mask;
7943         const struct rte_flow_item_eth *eth_v = item->spec;
7944         const struct rte_flow_item_eth nic_mask = {
7945                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
7946                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
7947                 .type = RTE_BE16(0xffff),
7948                 .has_vlan = 0,
7949         };
7950         void *hdrs_m;
7951         void *hdrs_v;
7952         char *l24_v;
7953         unsigned int i;
7954
7955         if (!eth_v)
7956                 return;
7957         if (!eth_m)
7958                 eth_m = &nic_mask;
7959         if (inner) {
7960                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
7961                                          inner_headers);
7962                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7963         } else {
7964                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
7965                                          outer_headers);
7966                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7967         }
7968         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, dmac_47_16),
7969                &eth_m->dst, sizeof(eth_m->dst));
7970         /* The value must be in the range of the mask. */
7971         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, dmac_47_16);
7972         for (i = 0; i < sizeof(eth_m->dst); ++i)
7973                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
7974         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, smac_47_16),
7975                &eth_m->src, sizeof(eth_m->src));
7976         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, smac_47_16);
7977         /* The value must be in the range of the mask. */
7978         for (i = 0; i < sizeof(eth_m->dst); ++i)
7979                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
7980         /*
7981          * HW supports match on one Ethertype, the Ethertype following the last
7982          * VLAN tag of the packet (see PRM).
7983          * Set match on ethertype only if ETH header is not followed by VLAN.
7984          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
7985          * ethertype, and use ip_version field instead.
7986          * eCPRI over Ether layer will use type value 0xAEFE.
7987          */
7988         if (eth_m->type == 0xFFFF) {
7989                 /* Set cvlan_tag mask for any single\multi\un-tagged case. */
7990                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
7991                 switch (eth_v->type) {
7992                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
7993                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
7994                         return;
7995                 case RTE_BE16(RTE_ETHER_TYPE_QINQ):
7996                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
7997                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
7998                         return;
7999                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
8000                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
8001                         return;
8002                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
8003                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
8004                         return;
8005                 default:
8006                         break;
8007                 }
8008         }
8009         if (eth_m->has_vlan) {
8010                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8011                 if (eth_v->has_vlan) {
8012                         /*
8013                          * Here, when also has_more_vlan field in VLAN item is
8014                          * not set, only single-tagged packets will be matched.
8015                          */
8016                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8017                         return;
8018                 }
8019         }
8020         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
8021                  rte_be_to_cpu_16(eth_m->type));
8022         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
8023         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
8024 }
8025
8026 /**
8027  * Add VLAN item to matcher and to the value.
8028  *
8029  * @param[in, out] dev_flow
8030  *   Flow descriptor.
8031  * @param[in, out] matcher
8032  *   Flow matcher.
8033  * @param[in, out] key
8034  *   Flow matcher value.
8035  * @param[in] item
8036  *   Flow pattern to translate.
8037  * @param[in] inner
8038  *   Item is inner pattern.
8039  */
8040 static void
8041 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
8042                             void *matcher, void *key,
8043                             const struct rte_flow_item *item,
8044                             int inner, uint32_t group)
8045 {
8046         const struct rte_flow_item_vlan *vlan_m = item->mask;
8047         const struct rte_flow_item_vlan *vlan_v = item->spec;
8048         void *hdrs_m;
8049         void *hdrs_v;
8050         uint16_t tci_m;
8051         uint16_t tci_v;
8052
8053         if (inner) {
8054                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8055                                          inner_headers);
8056                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8057         } else {
8058                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8059                                          outer_headers);
8060                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8061                 /*
8062                  * This is workaround, masks are not supported,
8063                  * and pre-validated.
8064                  */
8065                 if (vlan_v)
8066                         dev_flow->handle->vf_vlan.tag =
8067                                         rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
8068         }
8069         /*
8070          * When VLAN item exists in flow, mark packet as tagged,
8071          * even if TCI is not specified.
8072          */
8073         if (!MLX5_GET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag)) {
8074                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8075                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8076         }
8077         if (!vlan_v)
8078                 return;
8079         if (!vlan_m)
8080                 vlan_m = &rte_flow_item_vlan_mask;
8081         tci_m = rte_be_to_cpu_16(vlan_m->tci);
8082         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
8083         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_vid, tci_m);
8084         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_vid, tci_v);
8085         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_cfi, tci_m >> 12);
8086         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_cfi, tci_v >> 12);
8087         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_prio, tci_m >> 13);
8088         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_prio, tci_v >> 13);
8089         /*
8090          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
8091          * ethertype, and use ip_version field instead.
8092          */
8093         if (vlan_m->inner_type == 0xFFFF) {
8094                 switch (vlan_v->inner_type) {
8095                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
8096                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8097                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8098                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
8099                         return;
8100                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
8101                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
8102                         return;
8103                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
8104                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
8105                         return;
8106                 default:
8107                         break;
8108                 }
8109         }
8110         if (vlan_m->has_more_vlan && vlan_v->has_more_vlan) {
8111                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8112                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8113                 /* Only one vlan_tag bit can be set. */
8114                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
8115                 return;
8116         }
8117         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
8118                  rte_be_to_cpu_16(vlan_m->inner_type));
8119         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, ethertype,
8120                  rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
8121 }
8122
8123 /**
8124  * Add IPV4 item to matcher and to the value.
8125  *
8126  * @param[in, out] matcher
8127  *   Flow matcher.
8128  * @param[in, out] key
8129  *   Flow matcher value.
8130  * @param[in] item
8131  *   Flow pattern to translate.
8132  * @param[in] inner
8133  *   Item is inner pattern.
8134  * @param[in] group
8135  *   The group to insert the rule.
8136  */
8137 static void
8138 flow_dv_translate_item_ipv4(void *matcher, void *key,
8139                             const struct rte_flow_item *item,
8140                             int inner, uint32_t group)
8141 {
8142         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
8143         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
8144         const struct rte_flow_item_ipv4 nic_mask = {
8145                 .hdr = {
8146                         .src_addr = RTE_BE32(0xffffffff),
8147                         .dst_addr = RTE_BE32(0xffffffff),
8148                         .type_of_service = 0xff,
8149                         .next_proto_id = 0xff,
8150                         .time_to_live = 0xff,
8151                 },
8152         };
8153         void *headers_m;
8154         void *headers_v;
8155         char *l24_m;
8156         char *l24_v;
8157         uint8_t tos;
8158
8159         if (inner) {
8160                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8161                                          inner_headers);
8162                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8163         } else {
8164                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8165                                          outer_headers);
8166                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8167         }
8168         flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
8169         if (!ipv4_v)
8170                 return;
8171         if (!ipv4_m)
8172                 ipv4_m = &nic_mask;
8173         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8174                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
8175         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8176                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
8177         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
8178         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
8179         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8180                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
8181         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8182                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
8183         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
8184         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
8185         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
8186         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
8187                  ipv4_m->hdr.type_of_service);
8188         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
8189         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
8190                  ipv4_m->hdr.type_of_service >> 2);
8191         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
8192         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8193                  ipv4_m->hdr.next_proto_id);
8194         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8195                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
8196         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
8197                  ipv4_m->hdr.time_to_live);
8198         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
8199                  ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
8200         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
8201                  !!(ipv4_m->hdr.fragment_offset));
8202         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
8203                  !!(ipv4_v->hdr.fragment_offset & ipv4_m->hdr.fragment_offset));
8204 }
8205
8206 /**
8207  * Add IPV6 item to matcher and to the value.
8208  *
8209  * @param[in, out] matcher
8210  *   Flow matcher.
8211  * @param[in, out] key
8212  *   Flow matcher value.
8213  * @param[in] item
8214  *   Flow pattern to translate.
8215  * @param[in] inner
8216  *   Item is inner pattern.
8217  * @param[in] group
8218  *   The group to insert the rule.
8219  */
8220 static void
8221 flow_dv_translate_item_ipv6(void *matcher, void *key,
8222                             const struct rte_flow_item *item,
8223                             int inner, uint32_t group)
8224 {
8225         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
8226         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
8227         const struct rte_flow_item_ipv6 nic_mask = {
8228                 .hdr = {
8229                         .src_addr =
8230                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
8231                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
8232                         .dst_addr =
8233                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
8234                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
8235                         .vtc_flow = RTE_BE32(0xffffffff),
8236                         .proto = 0xff,
8237                         .hop_limits = 0xff,
8238                 },
8239         };
8240         void *headers_m;
8241         void *headers_v;
8242         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8243         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8244         char *l24_m;
8245         char *l24_v;
8246         uint32_t vtc_m;
8247         uint32_t vtc_v;
8248         int i;
8249         int size;
8250
8251         if (inner) {
8252                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8253                                          inner_headers);
8254                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8255         } else {
8256                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8257                                          outer_headers);
8258                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8259         }
8260         flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
8261         if (!ipv6_v)
8262                 return;
8263         if (!ipv6_m)
8264                 ipv6_m = &nic_mask;
8265         size = sizeof(ipv6_m->hdr.dst_addr);
8266         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8267                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
8268         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8269                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
8270         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
8271         for (i = 0; i < size; ++i)
8272                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
8273         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8274                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
8275         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8276                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
8277         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
8278         for (i = 0; i < size; ++i)
8279                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
8280         /* TOS. */
8281         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
8282         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
8283         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
8284         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
8285         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
8286         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
8287         /* Label. */
8288         if (inner) {
8289                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
8290                          vtc_m);
8291                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
8292                          vtc_v);
8293         } else {
8294                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
8295                          vtc_m);
8296                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
8297                          vtc_v);
8298         }
8299         /* Protocol. */
8300         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8301                  ipv6_m->hdr.proto);
8302         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8303                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
8304         /* Hop limit. */
8305         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
8306                  ipv6_m->hdr.hop_limits);
8307         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
8308                  ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
8309         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
8310                  !!(ipv6_m->has_frag_ext));
8311         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
8312                  !!(ipv6_v->has_frag_ext & ipv6_m->has_frag_ext));
8313 }
8314
8315 /**
8316  * Add IPV6 fragment extension item to matcher and to the value.
8317  *
8318  * @param[in, out] matcher
8319  *   Flow matcher.
8320  * @param[in, out] key
8321  *   Flow matcher value.
8322  * @param[in] item
8323  *   Flow pattern to translate.
8324  * @param[in] inner
8325  *   Item is inner pattern.
8326  */
8327 static void
8328 flow_dv_translate_item_ipv6_frag_ext(void *matcher, void *key,
8329                                      const struct rte_flow_item *item,
8330                                      int inner)
8331 {
8332         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_m = item->mask;
8333         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_v = item->spec;
8334         const struct rte_flow_item_ipv6_frag_ext nic_mask = {
8335                 .hdr = {
8336                         .next_header = 0xff,
8337                         .frag_data = RTE_BE16(0xffff),
8338                 },
8339         };
8340         void *headers_m;
8341         void *headers_v;
8342
8343         if (inner) {
8344                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8345                                          inner_headers);
8346                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8347         } else {
8348                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8349                                          outer_headers);
8350                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8351         }
8352         /* IPv6 fragment extension item exists, so packet is IP fragment. */
8353         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
8354         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 1);
8355         if (!ipv6_frag_ext_v)
8356                 return;
8357         if (!ipv6_frag_ext_m)
8358                 ipv6_frag_ext_m = &nic_mask;
8359         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8360                  ipv6_frag_ext_m->hdr.next_header);
8361         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8362                  ipv6_frag_ext_v->hdr.next_header &
8363                  ipv6_frag_ext_m->hdr.next_header);
8364 }
8365
8366 /**
8367  * Add TCP item to matcher and to the value.
8368  *
8369  * @param[in, out] matcher
8370  *   Flow matcher.
8371  * @param[in, out] key
8372  *   Flow matcher value.
8373  * @param[in] item
8374  *   Flow pattern to translate.
8375  * @param[in] inner
8376  *   Item is inner pattern.
8377  */
8378 static void
8379 flow_dv_translate_item_tcp(void *matcher, void *key,
8380                            const struct rte_flow_item *item,
8381                            int inner)
8382 {
8383         const struct rte_flow_item_tcp *tcp_m = item->mask;
8384         const struct rte_flow_item_tcp *tcp_v = item->spec;
8385         void *headers_m;
8386         void *headers_v;
8387
8388         if (inner) {
8389                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8390                                          inner_headers);
8391                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8392         } else {
8393                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8394                                          outer_headers);
8395                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8396         }
8397         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8398         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
8399         if (!tcp_v)
8400                 return;
8401         if (!tcp_m)
8402                 tcp_m = &rte_flow_item_tcp_mask;
8403         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
8404                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
8405         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
8406                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
8407         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
8408                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
8409         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
8410                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
8411         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
8412                  tcp_m->hdr.tcp_flags);
8413         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
8414                  (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
8415 }
8416
8417 /**
8418  * Add UDP item to matcher and to the value.
8419  *
8420  * @param[in, out] matcher
8421  *   Flow matcher.
8422  * @param[in, out] key
8423  *   Flow matcher value.
8424  * @param[in] item
8425  *   Flow pattern to translate.
8426  * @param[in] inner
8427  *   Item is inner pattern.
8428  */
8429 static void
8430 flow_dv_translate_item_udp(void *matcher, void *key,
8431                            const struct rte_flow_item *item,
8432                            int inner)
8433 {
8434         const struct rte_flow_item_udp *udp_m = item->mask;
8435         const struct rte_flow_item_udp *udp_v = item->spec;
8436         void *headers_m;
8437         void *headers_v;
8438
8439         if (inner) {
8440                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8441                                          inner_headers);
8442                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8443         } else {
8444                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8445                                          outer_headers);
8446                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8447         }
8448         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8449         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
8450         if (!udp_v)
8451                 return;
8452         if (!udp_m)
8453                 udp_m = &rte_flow_item_udp_mask;
8454         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
8455                  rte_be_to_cpu_16(udp_m->hdr.src_port));
8456         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
8457                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
8458         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
8459                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
8460         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
8461                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
8462 }
8463
8464 /**
8465  * Add GRE optional Key item to matcher and to the value.
8466  *
8467  * @param[in, out] matcher
8468  *   Flow matcher.
8469  * @param[in, out] key
8470  *   Flow matcher value.
8471  * @param[in] item
8472  *   Flow pattern to translate.
8473  * @param[in] inner
8474  *   Item is inner pattern.
8475  */
8476 static void
8477 flow_dv_translate_item_gre_key(void *matcher, void *key,
8478                                    const struct rte_flow_item *item)
8479 {
8480         const rte_be32_t *key_m = item->mask;
8481         const rte_be32_t *key_v = item->spec;
8482         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8483         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8484         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
8485
8486         /* GRE K bit must be on and should already be validated */
8487         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
8488         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
8489         if (!key_v)
8490                 return;
8491         if (!key_m)
8492                 key_m = &gre_key_default_mask;
8493         MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
8494                  rte_be_to_cpu_32(*key_m) >> 8);
8495         MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
8496                  rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
8497         MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
8498                  rte_be_to_cpu_32(*key_m) & 0xFF);
8499         MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
8500                  rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
8501 }
8502
8503 /**
8504  * Add GRE item to matcher and to the value.
8505  *
8506  * @param[in, out] matcher
8507  *   Flow matcher.
8508  * @param[in, out] key
8509  *   Flow matcher value.
8510  * @param[in] item
8511  *   Flow pattern to translate.
8512  * @param[in] inner
8513  *   Item is inner pattern.
8514  */
8515 static void
8516 flow_dv_translate_item_gre(void *matcher, void *key,
8517                            const struct rte_flow_item *item,
8518                            int inner)
8519 {
8520         const struct rte_flow_item_gre *gre_m = item->mask;
8521         const struct rte_flow_item_gre *gre_v = item->spec;
8522         void *headers_m;
8523         void *headers_v;
8524         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8525         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8526         struct {
8527                 union {
8528                         __extension__
8529                         struct {
8530                                 uint16_t version:3;
8531                                 uint16_t rsvd0:9;
8532                                 uint16_t s_present:1;
8533                                 uint16_t k_present:1;
8534                                 uint16_t rsvd_bit1:1;
8535                                 uint16_t c_present:1;
8536                         };
8537                         uint16_t value;
8538                 };
8539         } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
8540
8541         if (inner) {
8542                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8543                                          inner_headers);
8544                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8545         } else {
8546                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8547                                          outer_headers);
8548                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8549         }
8550         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8551         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
8552         if (!gre_v)
8553                 return;
8554         if (!gre_m)
8555                 gre_m = &rte_flow_item_gre_mask;
8556         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
8557                  rte_be_to_cpu_16(gre_m->protocol));
8558         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
8559                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
8560         gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
8561         gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
8562         MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
8563                  gre_crks_rsvd0_ver_m.c_present);
8564         MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
8565                  gre_crks_rsvd0_ver_v.c_present &
8566                  gre_crks_rsvd0_ver_m.c_present);
8567         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
8568                  gre_crks_rsvd0_ver_m.k_present);
8569         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
8570                  gre_crks_rsvd0_ver_v.k_present &
8571                  gre_crks_rsvd0_ver_m.k_present);
8572         MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
8573                  gre_crks_rsvd0_ver_m.s_present);
8574         MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
8575                  gre_crks_rsvd0_ver_v.s_present &
8576                  gre_crks_rsvd0_ver_m.s_present);
8577 }
8578
8579 /**
8580  * Add NVGRE item to matcher and to the value.
8581  *
8582  * @param[in, out] matcher
8583  *   Flow matcher.
8584  * @param[in, out] key
8585  *   Flow matcher value.
8586  * @param[in] item
8587  *   Flow pattern to translate.
8588  * @param[in] inner
8589  *   Item is inner pattern.
8590  */
8591 static void
8592 flow_dv_translate_item_nvgre(void *matcher, void *key,
8593                              const struct rte_flow_item *item,
8594                              int inner)
8595 {
8596         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
8597         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
8598         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8599         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8600         const char *tni_flow_id_m;
8601         const char *tni_flow_id_v;
8602         char *gre_key_m;
8603         char *gre_key_v;
8604         int size;
8605         int i;
8606
8607         /* For NVGRE, GRE header fields must be set with defined values. */
8608         const struct rte_flow_item_gre gre_spec = {
8609                 .c_rsvd0_ver = RTE_BE16(0x2000),
8610                 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
8611         };
8612         const struct rte_flow_item_gre gre_mask = {
8613                 .c_rsvd0_ver = RTE_BE16(0xB000),
8614                 .protocol = RTE_BE16(UINT16_MAX),
8615         };
8616         const struct rte_flow_item gre_item = {
8617                 .spec = &gre_spec,
8618                 .mask = &gre_mask,
8619                 .last = NULL,
8620         };
8621         flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
8622         if (!nvgre_v)
8623                 return;
8624         if (!nvgre_m)
8625                 nvgre_m = &rte_flow_item_nvgre_mask;
8626         tni_flow_id_m = (const char *)nvgre_m->tni;
8627         tni_flow_id_v = (const char *)nvgre_v->tni;
8628         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
8629         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
8630         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
8631         memcpy(gre_key_m, tni_flow_id_m, size);
8632         for (i = 0; i < size; ++i)
8633                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
8634 }
8635
8636 /**
8637  * Add VXLAN item to matcher and to the value.
8638  *
8639  * @param[in, out] matcher
8640  *   Flow matcher.
8641  * @param[in, out] key
8642  *   Flow matcher value.
8643  * @param[in] item
8644  *   Flow pattern to translate.
8645  * @param[in] inner
8646  *   Item is inner pattern.
8647  */
8648 static void
8649 flow_dv_translate_item_vxlan(void *matcher, void *key,
8650                              const struct rte_flow_item *item,
8651                              int inner)
8652 {
8653         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
8654         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
8655         void *headers_m;
8656         void *headers_v;
8657         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8658         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8659         char *vni_m;
8660         char *vni_v;
8661         uint16_t dport;
8662         int size;
8663         int i;
8664
8665         if (inner) {
8666                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8667                                          inner_headers);
8668                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8669         } else {
8670                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8671                                          outer_headers);
8672                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8673         }
8674         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
8675                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
8676         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8677                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8678                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8679         }
8680         if (!vxlan_v)
8681                 return;
8682         if (!vxlan_m)
8683                 vxlan_m = &rte_flow_item_vxlan_mask;
8684         size = sizeof(vxlan_m->vni);
8685         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
8686         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
8687         memcpy(vni_m, vxlan_m->vni, size);
8688         for (i = 0; i < size; ++i)
8689                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
8690 }
8691
8692 /**
8693  * Add VXLAN-GPE item to matcher and to the value.
8694  *
8695  * @param[in, out] matcher
8696  *   Flow matcher.
8697  * @param[in, out] key
8698  *   Flow matcher value.
8699  * @param[in] item
8700  *   Flow pattern to translate.
8701  * @param[in] inner
8702  *   Item is inner pattern.
8703  */
8704
8705 static void
8706 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
8707                                  const struct rte_flow_item *item, int inner)
8708 {
8709         const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
8710         const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
8711         void *headers_m;
8712         void *headers_v;
8713         void *misc_m =
8714                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
8715         void *misc_v =
8716                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
8717         char *vni_m;
8718         char *vni_v;
8719         uint16_t dport;
8720         int size;
8721         int i;
8722         uint8_t flags_m = 0xff;
8723         uint8_t flags_v = 0xc;
8724
8725         if (inner) {
8726                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8727                                          inner_headers);
8728                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8729         } else {
8730                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8731                                          outer_headers);
8732                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8733         }
8734         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
8735                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
8736         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8737                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8738                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8739         }
8740         if (!vxlan_v)
8741                 return;
8742         if (!vxlan_m)
8743                 vxlan_m = &rte_flow_item_vxlan_gpe_mask;
8744         size = sizeof(vxlan_m->vni);
8745         vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
8746         vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
8747         memcpy(vni_m, vxlan_m->vni, size);
8748         for (i = 0; i < size; ++i)
8749                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
8750         if (vxlan_m->flags) {
8751                 flags_m = vxlan_m->flags;
8752                 flags_v = vxlan_v->flags;
8753         }
8754         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
8755         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
8756         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol,
8757                  vxlan_m->protocol);
8758         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol,
8759                  vxlan_v->protocol);
8760 }
8761
8762 /**
8763  * Add Geneve item to matcher and to the value.
8764  *
8765  * @param[in, out] matcher
8766  *   Flow matcher.
8767  * @param[in, out] key
8768  *   Flow matcher value.
8769  * @param[in] item
8770  *   Flow pattern to translate.
8771  * @param[in] inner
8772  *   Item is inner pattern.
8773  */
8774
8775 static void
8776 flow_dv_translate_item_geneve(void *matcher, void *key,
8777                               const struct rte_flow_item *item, int inner)
8778 {
8779         const struct rte_flow_item_geneve *geneve_m = item->mask;
8780         const struct rte_flow_item_geneve *geneve_v = item->spec;
8781         void *headers_m;
8782         void *headers_v;
8783         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8784         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8785         uint16_t dport;
8786         uint16_t gbhdr_m;
8787         uint16_t gbhdr_v;
8788         char *vni_m;
8789         char *vni_v;
8790         size_t size, i;
8791
8792         if (inner) {
8793                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8794                                          inner_headers);
8795                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8796         } else {
8797                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8798                                          outer_headers);
8799                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8800         }
8801         dport = MLX5_UDP_PORT_GENEVE;
8802         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8803                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8804                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8805         }
8806         if (!geneve_v)
8807                 return;
8808         if (!geneve_m)
8809                 geneve_m = &rte_flow_item_geneve_mask;
8810         size = sizeof(geneve_m->vni);
8811         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
8812         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
8813         memcpy(vni_m, geneve_m->vni, size);
8814         for (i = 0; i < size; ++i)
8815                 vni_v[i] = vni_m[i] & geneve_v->vni[i];
8816         MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
8817                  rte_be_to_cpu_16(geneve_m->protocol));
8818         MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
8819                  rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
8820         gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
8821         gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
8822         MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
8823                  MLX5_GENEVE_OAMF_VAL(gbhdr_m));
8824         MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
8825                  MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
8826         MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
8827                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
8828         MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
8829                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
8830                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
8831 }
8832
8833 /**
8834  * Create Geneve TLV option resource.
8835  *
8836  * @param dev[in, out]
8837  *   Pointer to rte_eth_dev structure.
8838  * @param[in, out] tag_be24
8839  *   Tag value in big endian then R-shift 8.
8840  * @parm[in, out] dev_flow
8841  *   Pointer to the dev_flow.
8842  * @param[out] error
8843  *   pointer to error structure.
8844  *
8845  * @return
8846  *   0 on success otherwise -errno and errno is set.
8847  */
8848
8849 int
8850 flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev,
8851                                              const struct rte_flow_item *item,
8852                                              struct rte_flow_error *error)
8853 {
8854         struct mlx5_priv *priv = dev->data->dev_private;
8855         struct mlx5_dev_ctx_shared *sh = priv->sh;
8856         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
8857                         sh->geneve_tlv_option_resource;
8858         struct mlx5_devx_obj *obj;
8859         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
8860         int ret = 0;
8861
8862         if (!geneve_opt_v)
8863                 return -1;
8864         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
8865         if (geneve_opt_resource != NULL) {
8866                 if (geneve_opt_resource->option_class ==
8867                         geneve_opt_v->option_class &&
8868                         geneve_opt_resource->option_type ==
8869                         geneve_opt_v->option_type &&
8870                         geneve_opt_resource->length ==
8871                         geneve_opt_v->option_len) {
8872                         /* We already have GENVE TLV option obj allocated. */
8873                         __atomic_fetch_add(&geneve_opt_resource->refcnt, 1,
8874                                            __ATOMIC_RELAXED);
8875                 } else {
8876                         ret = rte_flow_error_set(error, ENOMEM,
8877                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8878                                 "Only one GENEVE TLV option supported");
8879                         goto exit;
8880                 }
8881         } else {
8882                 /* Create a GENEVE TLV object and resource. */
8883                 obj = mlx5_devx_cmd_create_geneve_tlv_option(sh->ctx,
8884                                 geneve_opt_v->option_class,
8885                                 geneve_opt_v->option_type,
8886                                 geneve_opt_v->option_len);
8887                 if (!obj) {
8888                         ret = rte_flow_error_set(error, ENODATA,
8889                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8890                                 "Failed to create GENEVE TLV Devx object");
8891                         goto exit;
8892                 }
8893                 sh->geneve_tlv_option_resource =
8894                                 mlx5_malloc(MLX5_MEM_ZERO,
8895                                                 sizeof(*geneve_opt_resource),
8896                                                 0, SOCKET_ID_ANY);
8897                 if (!sh->geneve_tlv_option_resource) {
8898                         claim_zero(mlx5_devx_cmd_destroy(obj));
8899                         ret = rte_flow_error_set(error, ENOMEM,
8900                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8901                                 "GENEVE TLV object memory allocation failed");
8902                         goto exit;
8903                 }
8904                 geneve_opt_resource = sh->geneve_tlv_option_resource;
8905                 geneve_opt_resource->obj = obj;
8906                 geneve_opt_resource->option_class = geneve_opt_v->option_class;
8907                 geneve_opt_resource->option_type = geneve_opt_v->option_type;
8908                 geneve_opt_resource->length = geneve_opt_v->option_len;
8909                 __atomic_store_n(&geneve_opt_resource->refcnt, 1,
8910                                 __ATOMIC_RELAXED);
8911         }
8912 exit:
8913         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
8914         return ret;
8915 }
8916
8917 /**
8918  * Add Geneve TLV option item to matcher.
8919  *
8920  * @param[in, out] dev
8921  *   Pointer to rte_eth_dev structure.
8922  * @param[in, out] matcher
8923  *   Flow matcher.
8924  * @param[in, out] key
8925  *   Flow matcher value.
8926  * @param[in] item
8927  *   Flow pattern to translate.
8928  * @param[out] error
8929  *   Pointer to error structure.
8930  */
8931 static int
8932 flow_dv_translate_item_geneve_opt(struct rte_eth_dev *dev, void *matcher,
8933                                   void *key, const struct rte_flow_item *item,
8934                                   struct rte_flow_error *error)
8935 {
8936         const struct rte_flow_item_geneve_opt *geneve_opt_m = item->mask;
8937         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
8938         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8939         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8940         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
8941                         misc_parameters_3);
8942         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
8943         rte_be32_t opt_data_key = 0, opt_data_mask = 0;
8944         int ret = 0;
8945
8946         if (!geneve_opt_v)
8947                 return -1;
8948         if (!geneve_opt_m)
8949                 geneve_opt_m = &rte_flow_item_geneve_opt_mask;
8950         ret = flow_dev_geneve_tlv_option_resource_register(dev, item,
8951                                                            error);
8952         if (ret) {
8953                 DRV_LOG(ERR, "Failed to create geneve_tlv_obj");
8954                 return ret;
8955         }
8956         /*
8957          * Set the option length in GENEVE header if not requested.
8958          * The GENEVE TLV option length is expressed by the option length field
8959          * in the GENEVE header.
8960          * If the option length was not requested but the GENEVE TLV option item
8961          * is present we set the option length field implicitly.
8962          */
8963         if (!MLX5_GET16(fte_match_set_misc, misc_m, geneve_opt_len)) {
8964                 MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
8965                          MLX5_GENEVE_OPTLEN_MASK);
8966                 MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
8967                          geneve_opt_v->option_len + 1);
8968         }
8969         /* Set the data. */
8970         if (geneve_opt_v->data) {
8971                 memcpy(&opt_data_key, geneve_opt_v->data,
8972                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
8973                                 sizeof(opt_data_key)));
8974                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
8975                                 sizeof(opt_data_key));
8976                 memcpy(&opt_data_mask, geneve_opt_m->data,
8977                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
8978                                 sizeof(opt_data_mask)));
8979                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
8980                                 sizeof(opt_data_mask));
8981                 MLX5_SET(fte_match_set_misc3, misc3_m,
8982                                 geneve_tlv_option_0_data,
8983                                 rte_be_to_cpu_32(opt_data_mask));
8984                 MLX5_SET(fte_match_set_misc3, misc3_v,
8985                                 geneve_tlv_option_0_data,
8986                         rte_be_to_cpu_32(opt_data_key & opt_data_mask));
8987         }
8988         return ret;
8989 }
8990
8991 /**
8992  * Add MPLS item to matcher and to the value.
8993  *
8994  * @param[in, out] matcher
8995  *   Flow matcher.
8996  * @param[in, out] key
8997  *   Flow matcher value.
8998  * @param[in] item
8999  *   Flow pattern to translate.
9000  * @param[in] prev_layer
9001  *   The protocol layer indicated in previous item.
9002  * @param[in] inner
9003  *   Item is inner pattern.
9004  */
9005 static void
9006 flow_dv_translate_item_mpls(void *matcher, void *key,
9007                             const struct rte_flow_item *item,
9008                             uint64_t prev_layer,
9009                             int inner)
9010 {
9011         const uint32_t *in_mpls_m = item->mask;
9012         const uint32_t *in_mpls_v = item->spec;
9013         uint32_t *out_mpls_m = 0;
9014         uint32_t *out_mpls_v = 0;
9015         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9016         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9017         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
9018                                      misc_parameters_2);
9019         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
9020         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
9021         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9022
9023         switch (prev_layer) {
9024         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
9025                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
9026                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
9027                          MLX5_UDP_PORT_MPLS);
9028                 break;
9029         case MLX5_FLOW_LAYER_GRE:
9030                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
9031                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
9032                          RTE_ETHER_TYPE_MPLS);
9033                 break;
9034         default:
9035                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
9036                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
9037                          IPPROTO_MPLS);
9038                 break;
9039         }
9040         if (!in_mpls_v)
9041                 return;
9042         if (!in_mpls_m)
9043                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
9044         switch (prev_layer) {
9045         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
9046                 out_mpls_m =
9047                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
9048                                                  outer_first_mpls_over_udp);
9049                 out_mpls_v =
9050                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
9051                                                  outer_first_mpls_over_udp);
9052                 break;
9053         case MLX5_FLOW_LAYER_GRE:
9054                 out_mpls_m =
9055                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
9056                                                  outer_first_mpls_over_gre);
9057                 out_mpls_v =
9058                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
9059                                                  outer_first_mpls_over_gre);
9060                 break;
9061         default:
9062                 /* Inner MPLS not over GRE is not supported. */
9063                 if (!inner) {
9064                         out_mpls_m =
9065                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
9066                                                          misc2_m,
9067                                                          outer_first_mpls);
9068                         out_mpls_v =
9069                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
9070                                                          misc2_v,
9071                                                          outer_first_mpls);
9072                 }
9073                 break;
9074         }
9075         if (out_mpls_m && out_mpls_v) {
9076                 *out_mpls_m = *in_mpls_m;
9077                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
9078         }
9079 }
9080
9081 /**
9082  * Add metadata register item to matcher
9083  *
9084  * @param[in, out] matcher
9085  *   Flow matcher.
9086  * @param[in, out] key
9087  *   Flow matcher value.
9088  * @param[in] reg_type
9089  *   Type of device metadata register
9090  * @param[in] value
9091  *   Register value
9092  * @param[in] mask
9093  *   Register mask
9094  */
9095 static void
9096 flow_dv_match_meta_reg(void *matcher, void *key,
9097                        enum modify_reg reg_type,
9098                        uint32_t data, uint32_t mask)
9099 {
9100         void *misc2_m =
9101                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
9102         void *misc2_v =
9103                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
9104         uint32_t temp;
9105
9106         data &= mask;
9107         switch (reg_type) {
9108         case REG_A:
9109                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
9110                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
9111                 break;
9112         case REG_B:
9113                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
9114                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
9115                 break;
9116         case REG_C_0:
9117                 /*
9118                  * The metadata register C0 field might be divided into
9119                  * source vport index and META item value, we should set
9120                  * this field according to specified mask, not as whole one.
9121                  */
9122                 temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
9123                 temp |= mask;
9124                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
9125                 temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
9126                 temp &= ~mask;
9127                 temp |= data;
9128                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
9129                 break;
9130         case REG_C_1:
9131                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
9132                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
9133                 break;
9134         case REG_C_2:
9135                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
9136                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
9137                 break;
9138         case REG_C_3:
9139                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
9140                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
9141                 break;
9142         case REG_C_4:
9143                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
9144                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
9145                 break;
9146         case REG_C_5:
9147                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
9148                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
9149                 break;
9150         case REG_C_6:
9151                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
9152                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
9153                 break;
9154         case REG_C_7:
9155                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
9156                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
9157                 break;
9158         default:
9159                 MLX5_ASSERT(false);
9160                 break;
9161         }
9162 }
9163
9164 /**
9165  * Add MARK item to matcher
9166  *
9167  * @param[in] dev
9168  *   The device to configure through.
9169  * @param[in, out] matcher
9170  *   Flow matcher.
9171  * @param[in, out] key
9172  *   Flow matcher value.
9173  * @param[in] item
9174  *   Flow pattern to translate.
9175  */
9176 static void
9177 flow_dv_translate_item_mark(struct rte_eth_dev *dev,
9178                             void *matcher, void *key,
9179                             const struct rte_flow_item *item)
9180 {
9181         struct mlx5_priv *priv = dev->data->dev_private;
9182         const struct rte_flow_item_mark *mark;
9183         uint32_t value;
9184         uint32_t mask;
9185
9186         mark = item->mask ? (const void *)item->mask :
9187                             &rte_flow_item_mark_mask;
9188         mask = mark->id & priv->sh->dv_mark_mask;
9189         mark = (const void *)item->spec;
9190         MLX5_ASSERT(mark);
9191         value = mark->id & priv->sh->dv_mark_mask & mask;
9192         if (mask) {
9193                 enum modify_reg reg;
9194
9195                 /* Get the metadata register index for the mark. */
9196                 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
9197                 MLX5_ASSERT(reg > 0);
9198                 if (reg == REG_C_0) {
9199                         struct mlx5_priv *priv = dev->data->dev_private;
9200                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9201                         uint32_t shl_c0 = rte_bsf32(msk_c0);
9202
9203                         mask &= msk_c0;
9204                         mask <<= shl_c0;
9205                         value <<= shl_c0;
9206                 }
9207                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
9208         }
9209 }
9210
9211 /**
9212  * Add META item to matcher
9213  *
9214  * @param[in] dev
9215  *   The devich to configure through.
9216  * @param[in, out] matcher
9217  *   Flow matcher.
9218  * @param[in, out] key
9219  *   Flow matcher value.
9220  * @param[in] attr
9221  *   Attributes of flow that includes this item.
9222  * @param[in] item
9223  *   Flow pattern to translate.
9224  */
9225 static void
9226 flow_dv_translate_item_meta(struct rte_eth_dev *dev,
9227                             void *matcher, void *key,
9228                             const struct rte_flow_attr *attr,
9229                             const struct rte_flow_item *item)
9230 {
9231         const struct rte_flow_item_meta *meta_m;
9232         const struct rte_flow_item_meta *meta_v;
9233
9234         meta_m = (const void *)item->mask;
9235         if (!meta_m)
9236                 meta_m = &rte_flow_item_meta_mask;
9237         meta_v = (const void *)item->spec;
9238         if (meta_v) {
9239                 int reg;
9240                 uint32_t value = meta_v->data;
9241                 uint32_t mask = meta_m->data;
9242
9243                 reg = flow_dv_get_metadata_reg(dev, attr, NULL);
9244                 if (reg < 0)
9245                         return;
9246                 MLX5_ASSERT(reg != REG_NON);
9247                 if (reg == REG_C_0) {
9248                         struct mlx5_priv *priv = dev->data->dev_private;
9249                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9250                         uint32_t shl_c0 = rte_bsf32(msk_c0);
9251
9252                         mask &= msk_c0;
9253                         mask <<= shl_c0;
9254                         value <<= shl_c0;
9255                 }
9256                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
9257         }
9258 }
9259
9260 /**
9261  * Add vport metadata Reg C0 item to matcher
9262  *
9263  * @param[in, out] matcher
9264  *   Flow matcher.
9265  * @param[in, out] key
9266  *   Flow matcher value.
9267  * @param[in] reg
9268  *   Flow pattern to translate.
9269  */
9270 static void
9271 flow_dv_translate_item_meta_vport(void *matcher, void *key,
9272                                   uint32_t value, uint32_t mask)
9273 {
9274         flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
9275 }
9276
9277 /**
9278  * Add tag item to matcher
9279  *
9280  * @param[in] dev
9281  *   The devich to configure through.
9282  * @param[in, out] matcher
9283  *   Flow matcher.
9284  * @param[in, out] key
9285  *   Flow matcher value.
9286  * @param[in] item
9287  *   Flow pattern to translate.
9288  */
9289 static void
9290 flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
9291                                 void *matcher, void *key,
9292                                 const struct rte_flow_item *item)
9293 {
9294         const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
9295         const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
9296         uint32_t mask, value;
9297
9298         MLX5_ASSERT(tag_v);
9299         value = tag_v->data;
9300         mask = tag_m ? tag_m->data : UINT32_MAX;
9301         if (tag_v->id == REG_C_0) {
9302                 struct mlx5_priv *priv = dev->data->dev_private;
9303                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9304                 uint32_t shl_c0 = rte_bsf32(msk_c0);
9305
9306                 mask &= msk_c0;
9307                 mask <<= shl_c0;
9308                 value <<= shl_c0;
9309         }
9310         flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
9311 }
9312
9313 /**
9314  * Add TAG item to matcher
9315  *
9316  * @param[in] dev
9317  *   The devich to configure through.
9318  * @param[in, out] matcher
9319  *   Flow matcher.
9320  * @param[in, out] key
9321  *   Flow matcher value.
9322  * @param[in] item
9323  *   Flow pattern to translate.
9324  */
9325 static void
9326 flow_dv_translate_item_tag(struct rte_eth_dev *dev,
9327                            void *matcher, void *key,
9328                            const struct rte_flow_item *item)
9329 {
9330         const struct rte_flow_item_tag *tag_v = item->spec;
9331         const struct rte_flow_item_tag *tag_m = item->mask;
9332         enum modify_reg reg;
9333
9334         MLX5_ASSERT(tag_v);
9335         tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
9336         /* Get the metadata register index for the tag. */
9337         reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
9338         MLX5_ASSERT(reg > 0);
9339         flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
9340 }
9341
9342 /**
9343  * Add source vport match to the specified matcher.
9344  *
9345  * @param[in, out] matcher
9346  *   Flow matcher.
9347  * @param[in, out] key
9348  *   Flow matcher value.
9349  * @param[in] port
9350  *   Source vport value to match
9351  * @param[in] mask
9352  *   Mask
9353  */
9354 static void
9355 flow_dv_translate_item_source_vport(void *matcher, void *key,
9356                                     int16_t port, uint16_t mask)
9357 {
9358         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9359         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9360
9361         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
9362         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
9363 }
9364
9365 /**
9366  * Translate port-id item to eswitch match on  port-id.
9367  *
9368  * @param[in] dev
9369  *   The devich to configure through.
9370  * @param[in, out] matcher
9371  *   Flow matcher.
9372  * @param[in, out] key
9373  *   Flow matcher value.
9374  * @param[in] item
9375  *   Flow pattern to translate.
9376  * @param[in]
9377  *   Flow attributes.
9378  *
9379  * @return
9380  *   0 on success, a negative errno value otherwise.
9381  */
9382 static int
9383 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
9384                                void *key, const struct rte_flow_item *item,
9385                                const struct rte_flow_attr *attr)
9386 {
9387         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
9388         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
9389         struct mlx5_priv *priv;
9390         uint16_t mask, id;
9391
9392         mask = pid_m ? pid_m->id : 0xffff;
9393         id = pid_v ? pid_v->id : dev->data->port_id;
9394         priv = mlx5_port_to_eswitch_info(id, item == NULL);
9395         if (!priv)
9396                 return -rte_errno;
9397         /*
9398          * Translate to vport field or to metadata, depending on mode.
9399          * Kernel can use either misc.source_port or half of C0 metadata
9400          * register.
9401          */
9402         if (priv->vport_meta_mask) {
9403                 /*
9404                  * Provide the hint for SW steering library
9405                  * to insert the flow into ingress domain and
9406                  * save the extra vport match.
9407                  */
9408                 if (mask == 0xffff && priv->vport_id == 0xffff &&
9409                     priv->pf_bond < 0 && attr->transfer)
9410                         flow_dv_translate_item_source_vport
9411                                 (matcher, key, priv->vport_id, mask);
9412                 /*
9413                  * We should always set the vport metadata register,
9414                  * otherwise the SW steering library can drop
9415                  * the rule if wire vport metadata value is not zero,
9416                  * it depends on kernel configuration.
9417                  */
9418                 flow_dv_translate_item_meta_vport(matcher, key,
9419                                                   priv->vport_meta_tag,
9420                                                   priv->vport_meta_mask);
9421         } else {
9422                 flow_dv_translate_item_source_vport(matcher, key,
9423                                                     priv->vport_id, mask);
9424         }
9425         return 0;
9426 }
9427
9428 /**
9429  * Add ICMP6 item to matcher and to the value.
9430  *
9431  * @param[in, out] matcher
9432  *   Flow matcher.
9433  * @param[in, out] key
9434  *   Flow matcher value.
9435  * @param[in] item
9436  *   Flow pattern to translate.
9437  * @param[in] inner
9438  *   Item is inner pattern.
9439  */
9440 static void
9441 flow_dv_translate_item_icmp6(void *matcher, void *key,
9442                               const struct rte_flow_item *item,
9443                               int inner)
9444 {
9445         const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
9446         const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
9447         void *headers_m;
9448         void *headers_v;
9449         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9450                                      misc_parameters_3);
9451         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9452         if (inner) {
9453                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9454                                          inner_headers);
9455                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9456         } else {
9457                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9458                                          outer_headers);
9459                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9460         }
9461         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
9462         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
9463         if (!icmp6_v)
9464                 return;
9465         if (!icmp6_m)
9466                 icmp6_m = &rte_flow_item_icmp6_mask;
9467         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
9468         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
9469                  icmp6_v->type & icmp6_m->type);
9470         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
9471         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
9472                  icmp6_v->code & icmp6_m->code);
9473 }
9474
9475 /**
9476  * Add ICMP item to matcher and to the value.
9477  *
9478  * @param[in, out] matcher
9479  *   Flow matcher.
9480  * @param[in, out] key
9481  *   Flow matcher value.
9482  * @param[in] item
9483  *   Flow pattern to translate.
9484  * @param[in] inner
9485  *   Item is inner pattern.
9486  */
9487 static void
9488 flow_dv_translate_item_icmp(void *matcher, void *key,
9489                             const struct rte_flow_item *item,
9490                             int inner)
9491 {
9492         const struct rte_flow_item_icmp *icmp_m = item->mask;
9493         const struct rte_flow_item_icmp *icmp_v = item->spec;
9494         uint32_t icmp_header_data_m = 0;
9495         uint32_t icmp_header_data_v = 0;
9496         void *headers_m;
9497         void *headers_v;
9498         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9499                                      misc_parameters_3);
9500         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9501         if (inner) {
9502                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9503                                          inner_headers);
9504                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9505         } else {
9506                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9507                                          outer_headers);
9508                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9509         }
9510         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
9511         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
9512         if (!icmp_v)
9513                 return;
9514         if (!icmp_m)
9515                 icmp_m = &rte_flow_item_icmp_mask;
9516         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
9517                  icmp_m->hdr.icmp_type);
9518         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
9519                  icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
9520         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
9521                  icmp_m->hdr.icmp_code);
9522         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
9523                  icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
9524         icmp_header_data_m = rte_be_to_cpu_16(icmp_m->hdr.icmp_seq_nb);
9525         icmp_header_data_m |= rte_be_to_cpu_16(icmp_m->hdr.icmp_ident) << 16;
9526         if (icmp_header_data_m) {
9527                 icmp_header_data_v = rte_be_to_cpu_16(icmp_v->hdr.icmp_seq_nb);
9528                 icmp_header_data_v |=
9529                          rte_be_to_cpu_16(icmp_v->hdr.icmp_ident) << 16;
9530                 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_header_data,
9531                          icmp_header_data_m);
9532                 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_header_data,
9533                          icmp_header_data_v & icmp_header_data_m);
9534         }
9535 }
9536
9537 /**
9538  * Add GTP item to matcher and to the value.
9539  *
9540  * @param[in, out] matcher
9541  *   Flow matcher.
9542  * @param[in, out] key
9543  *   Flow matcher value.
9544  * @param[in] item
9545  *   Flow pattern to translate.
9546  * @param[in] inner
9547  *   Item is inner pattern.
9548  */
9549 static void
9550 flow_dv_translate_item_gtp(void *matcher, void *key,
9551                            const struct rte_flow_item *item, int inner)
9552 {
9553         const struct rte_flow_item_gtp *gtp_m = item->mask;
9554         const struct rte_flow_item_gtp *gtp_v = item->spec;
9555         void *headers_m;
9556         void *headers_v;
9557         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9558                                      misc_parameters_3);
9559         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9560         uint16_t dport = RTE_GTPU_UDP_PORT;
9561
9562         if (inner) {
9563                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9564                                          inner_headers);
9565                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9566         } else {
9567                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9568                                          outer_headers);
9569                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9570         }
9571         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9572                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
9573                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
9574         }
9575         if (!gtp_v)
9576                 return;
9577         if (!gtp_m)
9578                 gtp_m = &rte_flow_item_gtp_mask;
9579         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags,
9580                  gtp_m->v_pt_rsv_flags);
9581         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags,
9582                  gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags);
9583         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
9584         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
9585                  gtp_v->msg_type & gtp_m->msg_type);
9586         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,
9587                  rte_be_to_cpu_32(gtp_m->teid));
9588         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,
9589                  rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
9590 }
9591
9592 /**
9593  * Add GTP PSC item to matcher.
9594  *
9595  * @param[in, out] matcher
9596  *   Flow matcher.
9597  * @param[in, out] key
9598  *   Flow matcher value.
9599  * @param[in] item
9600  *   Flow pattern to translate.
9601  */
9602 static int
9603 flow_dv_translate_item_gtp_psc(void *matcher, void *key,
9604                                const struct rte_flow_item *item)
9605 {
9606         const struct rte_flow_item_gtp_psc *gtp_psc_m = item->mask;
9607         const struct rte_flow_item_gtp_psc *gtp_psc_v = item->spec;
9608         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9609                         misc_parameters_3);
9610         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9611         union {
9612                 uint32_t w32;
9613                 struct {
9614                         uint16_t seq_num;
9615                         uint8_t npdu_num;
9616                         uint8_t next_ext_header_type;
9617                 };
9618         } dw_2;
9619         uint8_t gtp_flags;
9620
9621         /* Always set E-flag match on one, regardless of GTP item settings. */
9622         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_m, gtpu_msg_flags);
9623         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
9624         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags, gtp_flags);
9625         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_v, gtpu_msg_flags);
9626         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
9627         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags, gtp_flags);
9628         /*Set next extension header type. */
9629         dw_2.seq_num = 0;
9630         dw_2.npdu_num = 0;
9631         dw_2.next_ext_header_type = 0xff;
9632         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_dw_2,
9633                  rte_cpu_to_be_32(dw_2.w32));
9634         dw_2.seq_num = 0;
9635         dw_2.npdu_num = 0;
9636         dw_2.next_ext_header_type = 0x85;
9637         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_dw_2,
9638                  rte_cpu_to_be_32(dw_2.w32));
9639         if (gtp_psc_v) {
9640                 union {
9641                         uint32_t w32;
9642                         struct {
9643                                 uint8_t len;
9644                                 uint8_t type_flags;
9645                                 uint8_t qfi;
9646                                 uint8_t reserved;
9647                         };
9648                 } dw_0;
9649
9650                 /*Set extension header PDU type and Qos. */
9651                 if (!gtp_psc_m)
9652                         gtp_psc_m = &rte_flow_item_gtp_psc_mask;
9653                 dw_0.w32 = 0;
9654                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_m->pdu_type);
9655                 dw_0.qfi = gtp_psc_m->qfi;
9656                 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_first_ext_dw_0,
9657                          rte_cpu_to_be_32(dw_0.w32));
9658                 dw_0.w32 = 0;
9659                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_v->pdu_type &
9660                                                         gtp_psc_m->pdu_type);
9661                 dw_0.qfi = gtp_psc_v->qfi & gtp_psc_m->qfi;
9662                 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_first_ext_dw_0,
9663                          rte_cpu_to_be_32(dw_0.w32));
9664         }
9665         return 0;
9666 }
9667
9668 /**
9669  * Add eCPRI item to matcher and to the value.
9670  *
9671  * @param[in] dev
9672  *   The devich to configure through.
9673  * @param[in, out] matcher
9674  *   Flow matcher.
9675  * @param[in, out] key
9676  *   Flow matcher value.
9677  * @param[in] item
9678  *   Flow pattern to translate.
9679  * @param[in] samples
9680  *   Sample IDs to be used in the matching.
9681  */
9682 static void
9683 flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
9684                              void *key, const struct rte_flow_item *item)
9685 {
9686         struct mlx5_priv *priv = dev->data->dev_private;
9687         const struct rte_flow_item_ecpri *ecpri_m = item->mask;
9688         const struct rte_flow_item_ecpri *ecpri_v = item->spec;
9689         struct rte_ecpri_common_hdr common;
9690         void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
9691                                      misc_parameters_4);
9692         void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
9693         uint32_t *samples;
9694         void *dw_m;
9695         void *dw_v;
9696
9697         if (!ecpri_v)
9698                 return;
9699         if (!ecpri_m)
9700                 ecpri_m = &rte_flow_item_ecpri_mask;
9701         /*
9702          * Maximal four DW samples are supported in a single matching now.
9703          * Two are used now for a eCPRI matching:
9704          * 1. Type: one byte, mask should be 0x00ff0000 in network order
9705          * 2. ID of a message: one or two bytes, mask 0xffff0000 or 0xff000000
9706          *    if any.
9707          */
9708         if (!ecpri_m->hdr.common.u32)
9709                 return;
9710         samples = priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0].ids;
9711         /* Need to take the whole DW as the mask to fill the entry. */
9712         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
9713                             prog_sample_field_value_0);
9714         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
9715                             prog_sample_field_value_0);
9716         /* Already big endian (network order) in the header. */
9717         *(uint32_t *)dw_m = ecpri_m->hdr.common.u32;
9718         *(uint32_t *)dw_v = ecpri_v->hdr.common.u32 & ecpri_m->hdr.common.u32;
9719         /* Sample#0, used for matching type, offset 0. */
9720         MLX5_SET(fte_match_set_misc4, misc4_m,
9721                  prog_sample_field_id_0, samples[0]);
9722         /* It makes no sense to set the sample ID in the mask field. */
9723         MLX5_SET(fte_match_set_misc4, misc4_v,
9724                  prog_sample_field_id_0, samples[0]);
9725         /*
9726          * Checking if message body part needs to be matched.
9727          * Some wildcard rules only matching type field should be supported.
9728          */
9729         if (ecpri_m->hdr.dummy[0]) {
9730                 common.u32 = rte_be_to_cpu_32(ecpri_v->hdr.common.u32);
9731                 switch (common.type) {
9732                 case RTE_ECPRI_MSG_TYPE_IQ_DATA:
9733                 case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
9734                 case RTE_ECPRI_MSG_TYPE_DLY_MSR:
9735                         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
9736                                             prog_sample_field_value_1);
9737                         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
9738                                             prog_sample_field_value_1);
9739                         *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0];
9740                         *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0] &
9741                                             ecpri_m->hdr.dummy[0];
9742                         /* Sample#1, to match message body, offset 4. */
9743                         MLX5_SET(fte_match_set_misc4, misc4_m,
9744                                  prog_sample_field_id_1, samples[1]);
9745                         MLX5_SET(fte_match_set_misc4, misc4_v,
9746                                  prog_sample_field_id_1, samples[1]);
9747                         break;
9748                 default:
9749                         /* Others, do not match any sample ID. */
9750                         break;
9751                 }
9752         }
9753 }
9754
9755 /*
9756  * Add connection tracking status item to matcher
9757  *
9758  * @param[in] dev
9759  *   The devich to configure through.
9760  * @param[in, out] matcher
9761  *   Flow matcher.
9762  * @param[in, out] key
9763  *   Flow matcher value.
9764  * @param[in] item
9765  *   Flow pattern to translate.
9766  */
9767 static void
9768 flow_dv_translate_item_aso_ct(struct rte_eth_dev *dev,
9769                               void *matcher, void *key,
9770                               const struct rte_flow_item *item)
9771 {
9772         uint32_t reg_value = 0;
9773         int reg_id;
9774         /* 8LSB 0b 11/0000/11, middle 4 bits are reserved. */
9775         uint32_t reg_mask = 0;
9776         const struct rte_flow_item_conntrack *spec = item->spec;
9777         const struct rte_flow_item_conntrack *mask = item->mask;
9778         uint32_t flags;
9779         struct rte_flow_error error;
9780
9781         if (!mask)
9782                 mask = &rte_flow_item_conntrack_mask;
9783         if (!spec || !mask->flags)
9784                 return;
9785         flags = spec->flags & mask->flags;
9786         /* The conflict should be checked in the validation. */
9787         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID)
9788                 reg_value |= MLX5_CT_SYNDROME_VALID;
9789         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
9790                 reg_value |= MLX5_CT_SYNDROME_STATE_CHANGE;
9791         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID)
9792                 reg_value |= MLX5_CT_SYNDROME_INVALID;
9793         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)
9794                 reg_value |= MLX5_CT_SYNDROME_TRAP;
9795         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
9796                 reg_value |= MLX5_CT_SYNDROME_BAD_PACKET;
9797         if (mask->flags & (RTE_FLOW_CONNTRACK_PKT_STATE_VALID |
9798                            RTE_FLOW_CONNTRACK_PKT_STATE_INVALID |
9799                            RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED))
9800                 reg_mask |= 0xc0;
9801         if (mask->flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
9802                 reg_mask |= MLX5_CT_SYNDROME_STATE_CHANGE;
9803         if (mask->flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
9804                 reg_mask |= MLX5_CT_SYNDROME_BAD_PACKET;
9805         /* The REG_C_x value could be saved during startup. */
9806         reg_id = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, &error);
9807         if (reg_id == REG_NON)
9808                 return;
9809         flow_dv_match_meta_reg(matcher, key, (enum modify_reg)reg_id,
9810                                reg_value, reg_mask);
9811 }
9812
9813 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
9814
9815 #define HEADER_IS_ZERO(match_criteria, headers)                              \
9816         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
9817                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
9818
9819 /**
9820  * Calculate flow matcher enable bitmap.
9821  *
9822  * @param match_criteria
9823  *   Pointer to flow matcher criteria.
9824  *
9825  * @return
9826  *   Bitmap of enabled fields.
9827  */
9828 static uint8_t
9829 flow_dv_matcher_enable(uint32_t *match_criteria)
9830 {
9831         uint8_t match_criteria_enable;
9832
9833         match_criteria_enable =
9834                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
9835                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
9836         match_criteria_enable |=
9837                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
9838                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
9839         match_criteria_enable |=
9840                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
9841                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
9842         match_criteria_enable |=
9843                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
9844                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
9845         match_criteria_enable |=
9846                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
9847                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
9848         match_criteria_enable |=
9849                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) <<
9850                 MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT;
9851         return match_criteria_enable;
9852 }
9853
9854 struct mlx5_hlist_entry *
9855 flow_dv_tbl_create_cb(struct mlx5_hlist *list, uint64_t key64, void *cb_ctx)
9856 {
9857         struct mlx5_dev_ctx_shared *sh = list->ctx;
9858         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9859         struct rte_eth_dev *dev = ctx->dev;
9860         struct mlx5_flow_tbl_data_entry *tbl_data;
9861         struct mlx5_flow_tbl_tunnel_prm *tt_prm = ctx->data;
9862         struct rte_flow_error *error = ctx->error;
9863         union mlx5_flow_tbl_key key = { .v64 = key64 };
9864         struct mlx5_flow_tbl_resource *tbl;
9865         void *domain;
9866         uint32_t idx = 0;
9867         int ret;
9868
9869         tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
9870         if (!tbl_data) {
9871                 rte_flow_error_set(error, ENOMEM,
9872                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9873                                    NULL,
9874                                    "cannot allocate flow table data entry");
9875                 return NULL;
9876         }
9877         tbl_data->idx = idx;
9878         tbl_data->tunnel = tt_prm->tunnel;
9879         tbl_data->group_id = tt_prm->group_id;
9880         tbl_data->external = !!tt_prm->external;
9881         tbl_data->tunnel_offload = is_tunnel_offload_active(dev);
9882         tbl_data->is_egress = !!key.is_egress;
9883         tbl_data->is_transfer = !!key.is_fdb;
9884         tbl_data->dummy = !!key.dummy;
9885         tbl_data->level = key.level;
9886         tbl_data->id = key.id;
9887         tbl = &tbl_data->tbl;
9888         if (key.dummy)
9889                 return &tbl_data->entry;
9890         if (key.is_fdb)
9891                 domain = sh->fdb_domain;
9892         else if (key.is_egress)
9893                 domain = sh->tx_domain;
9894         else
9895                 domain = sh->rx_domain;
9896         ret = mlx5_flow_os_create_flow_tbl(domain, key.level, &tbl->obj);
9897         if (ret) {
9898                 rte_flow_error_set(error, ENOMEM,
9899                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9900                                    NULL, "cannot create flow table object");
9901                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
9902                 return NULL;
9903         }
9904         if (key.level != 0) {
9905                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
9906                                         (tbl->obj, &tbl_data->jump.action);
9907                 if (ret) {
9908                         rte_flow_error_set(error, ENOMEM,
9909                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9910                                            NULL,
9911                                            "cannot create flow jump action");
9912                         mlx5_flow_os_destroy_flow_tbl(tbl->obj);
9913                         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
9914                         return NULL;
9915                 }
9916         }
9917         MKSTR(matcher_name, "%s_%s_%u_%u_matcher_cache",
9918               key.is_fdb ? "FDB" : "NIC", key.is_egress ? "egress" : "ingress",
9919               key.level, key.id);
9920         mlx5_cache_list_init(&tbl_data->matchers, matcher_name, 0, sh,
9921                              flow_dv_matcher_create_cb,
9922                              flow_dv_matcher_match_cb,
9923                              flow_dv_matcher_remove_cb);
9924         return &tbl_data->entry;
9925 }
9926
9927 int
9928 flow_dv_tbl_match_cb(struct mlx5_hlist *list __rte_unused,
9929                      struct mlx5_hlist_entry *entry, uint64_t key64,
9930                      void *cb_ctx __rte_unused)
9931 {
9932         struct mlx5_flow_tbl_data_entry *tbl_data =
9933                 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
9934         union mlx5_flow_tbl_key key = { .v64 = key64 };
9935
9936         return tbl_data->level != key.level ||
9937                tbl_data->id != key.id ||
9938                tbl_data->dummy != key.dummy ||
9939                tbl_data->is_transfer != !!key.is_fdb ||
9940                tbl_data->is_egress != !!key.is_egress;
9941 }
9942
9943 /**
9944  * Get a flow table.
9945  *
9946  * @param[in, out] dev
9947  *   Pointer to rte_eth_dev structure.
9948  * @param[in] table_level
9949  *   Table level to use.
9950  * @param[in] egress
9951  *   Direction of the table.
9952  * @param[in] transfer
9953  *   E-Switch or NIC flow.
9954  * @param[in] dummy
9955  *   Dummy entry for dv API.
9956  * @param[in] table_id
9957  *   Table id to use.
9958  * @param[out] error
9959  *   pointer to error structure.
9960  *
9961  * @return
9962  *   Returns tables resource based on the index, NULL in case of failed.
9963  */
9964 struct mlx5_flow_tbl_resource *
9965 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
9966                          uint32_t table_level, uint8_t egress,
9967                          uint8_t transfer,
9968                          bool external,
9969                          const struct mlx5_flow_tunnel *tunnel,
9970                          uint32_t group_id, uint8_t dummy,
9971                          uint32_t table_id,
9972                          struct rte_flow_error *error)
9973 {
9974         struct mlx5_priv *priv = dev->data->dev_private;
9975         union mlx5_flow_tbl_key table_key = {
9976                 {
9977                         .level = table_level,
9978                         .id = table_id,
9979                         .reserved = 0,
9980                         .dummy = !!dummy,
9981                         .is_fdb = !!transfer,
9982                         .is_egress = !!egress,
9983                 }
9984         };
9985         struct mlx5_flow_tbl_tunnel_prm tt_prm = {
9986                 .tunnel = tunnel,
9987                 .group_id = group_id,
9988                 .external = external,
9989         };
9990         struct mlx5_flow_cb_ctx ctx = {
9991                 .dev = dev,
9992                 .error = error,
9993                 .data = &tt_prm,
9994         };
9995         struct mlx5_hlist_entry *entry;
9996         struct mlx5_flow_tbl_data_entry *tbl_data;
9997
9998         entry = mlx5_hlist_register(priv->sh->flow_tbls, table_key.v64, &ctx);
9999         if (!entry) {
10000                 rte_flow_error_set(error, ENOMEM,
10001                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10002                                    "cannot get table");
10003                 return NULL;
10004         }
10005         DRV_LOG(DEBUG, "table_level %u table_id %u "
10006                 "tunnel %u group %u registered.",
10007                 table_level, table_id,
10008                 tunnel ? tunnel->tunnel_id : 0, group_id);
10009         tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10010         return &tbl_data->tbl;
10011 }
10012
10013 void
10014 flow_dv_tbl_remove_cb(struct mlx5_hlist *list,
10015                       struct mlx5_hlist_entry *entry)
10016 {
10017         struct mlx5_dev_ctx_shared *sh = list->ctx;
10018         struct mlx5_flow_tbl_data_entry *tbl_data =
10019                 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10020
10021         MLX5_ASSERT(entry && sh);
10022         if (tbl_data->jump.action)
10023                 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
10024         if (tbl_data->tbl.obj)
10025                 mlx5_flow_os_destroy_flow_tbl(tbl_data->tbl.obj);
10026         if (tbl_data->tunnel_offload && tbl_data->external) {
10027                 struct mlx5_hlist_entry *he;
10028                 struct mlx5_hlist *tunnel_grp_hash;
10029                 struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
10030                 union tunnel_tbl_key tunnel_key = {
10031                         .tunnel_id = tbl_data->tunnel ?
10032                                         tbl_data->tunnel->tunnel_id : 0,
10033                         .group = tbl_data->group_id
10034                 };
10035                 uint32_t table_level = tbl_data->level;
10036
10037                 tunnel_grp_hash = tbl_data->tunnel ?
10038                                         tbl_data->tunnel->groups :
10039                                         thub->groups;
10040                 he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val, NULL);
10041                 if (he)
10042                         mlx5_hlist_unregister(tunnel_grp_hash, he);
10043                 DRV_LOG(DEBUG,
10044                         "table_level %u id %u tunnel %u group %u released.",
10045                         table_level,
10046                         tbl_data->id,
10047                         tbl_data->tunnel ?
10048                         tbl_data->tunnel->tunnel_id : 0,
10049                         tbl_data->group_id);
10050         }
10051         mlx5_cache_list_destroy(&tbl_data->matchers);
10052         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
10053 }
10054
10055 /**
10056  * Release a flow table.
10057  *
10058  * @param[in] sh
10059  *   Pointer to device shared structure.
10060  * @param[in] tbl
10061  *   Table resource to be released.
10062  *
10063  * @return
10064  *   Returns 0 if table was released, else return 1;
10065  */
10066 static int
10067 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
10068                              struct mlx5_flow_tbl_resource *tbl)
10069 {
10070         struct mlx5_flow_tbl_data_entry *tbl_data =
10071                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
10072
10073         if (!tbl)
10074                 return 0;
10075         return mlx5_hlist_unregister(sh->flow_tbls, &tbl_data->entry);
10076 }
10077
10078 int
10079 flow_dv_matcher_match_cb(struct mlx5_cache_list *list __rte_unused,
10080                          struct mlx5_cache_entry *entry, void *cb_ctx)
10081 {
10082         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10083         struct mlx5_flow_dv_matcher *ref = ctx->data;
10084         struct mlx5_flow_dv_matcher *cur = container_of(entry, typeof(*cur),
10085                                                         entry);
10086
10087         return cur->crc != ref->crc ||
10088                cur->priority != ref->priority ||
10089                memcmp((const void *)cur->mask.buf,
10090                       (const void *)ref->mask.buf, ref->mask.size);
10091 }
10092
10093 struct mlx5_cache_entry *
10094 flow_dv_matcher_create_cb(struct mlx5_cache_list *list,
10095                           struct mlx5_cache_entry *entry __rte_unused,
10096                           void *cb_ctx)
10097 {
10098         struct mlx5_dev_ctx_shared *sh = list->ctx;
10099         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10100         struct mlx5_flow_dv_matcher *ref = ctx->data;
10101         struct mlx5_flow_dv_matcher *cache;
10102         struct mlx5dv_flow_matcher_attr dv_attr = {
10103                 .type = IBV_FLOW_ATTR_NORMAL,
10104                 .match_mask = (void *)&ref->mask,
10105         };
10106         struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
10107                                                             typeof(*tbl), tbl);
10108         int ret;
10109
10110         cache = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*cache), 0, SOCKET_ID_ANY);
10111         if (!cache) {
10112                 rte_flow_error_set(ctx->error, ENOMEM,
10113                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10114                                    "cannot create matcher");
10115                 return NULL;
10116         }
10117         *cache = *ref;
10118         dv_attr.match_criteria_enable =
10119                 flow_dv_matcher_enable(cache->mask.buf);
10120         dv_attr.priority = ref->priority;
10121         if (tbl->is_egress)
10122                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
10123         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->tbl.obj,
10124                                                &cache->matcher_object);
10125         if (ret) {
10126                 mlx5_free(cache);
10127                 rte_flow_error_set(ctx->error, ENOMEM,
10128                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10129                                    "cannot create matcher");
10130                 return NULL;
10131         }
10132         return &cache->entry;
10133 }
10134
10135 /**
10136  * Register the flow matcher.
10137  *
10138  * @param[in, out] dev
10139  *   Pointer to rte_eth_dev structure.
10140  * @param[in, out] matcher
10141  *   Pointer to flow matcher.
10142  * @param[in, out] key
10143  *   Pointer to flow table key.
10144  * @parm[in, out] dev_flow
10145  *   Pointer to the dev_flow.
10146  * @param[out] error
10147  *   pointer to error structure.
10148  *
10149  * @return
10150  *   0 on success otherwise -errno and errno is set.
10151  */
10152 static int
10153 flow_dv_matcher_register(struct rte_eth_dev *dev,
10154                          struct mlx5_flow_dv_matcher *ref,
10155                          union mlx5_flow_tbl_key *key,
10156                          struct mlx5_flow *dev_flow,
10157                          const struct mlx5_flow_tunnel *tunnel,
10158                          uint32_t group_id,
10159                          struct rte_flow_error *error)
10160 {
10161         struct mlx5_cache_entry *entry;
10162         struct mlx5_flow_dv_matcher *cache;
10163         struct mlx5_flow_tbl_resource *tbl;
10164         struct mlx5_flow_tbl_data_entry *tbl_data;
10165         struct mlx5_flow_cb_ctx ctx = {
10166                 .error = error,
10167                 .data = ref,
10168         };
10169
10170         /**
10171          * tunnel offload API requires this registration for cases when
10172          * tunnel match rule was inserted before tunnel set rule.
10173          */
10174         tbl = flow_dv_tbl_resource_get(dev, key->level,
10175                                        key->is_egress, key->is_fdb,
10176                                        dev_flow->external, tunnel,
10177                                        group_id, 0, key->id, error);
10178         if (!tbl)
10179                 return -rte_errno;      /* No need to refill the error info */
10180         tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
10181         ref->tbl = tbl;
10182         entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
10183         if (!entry) {
10184                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
10185                 return rte_flow_error_set(error, ENOMEM,
10186                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10187                                           "cannot allocate ref memory");
10188         }
10189         cache = container_of(entry, typeof(*cache), entry);
10190         dev_flow->handle->dvh.matcher = cache;
10191         return 0;
10192 }
10193
10194 struct mlx5_hlist_entry *
10195 flow_dv_tag_create_cb(struct mlx5_hlist *list, uint64_t key, void *ctx)
10196 {
10197         struct mlx5_dev_ctx_shared *sh = list->ctx;
10198         struct rte_flow_error *error = ctx;
10199         struct mlx5_flow_dv_tag_resource *entry;
10200         uint32_t idx = 0;
10201         int ret;
10202
10203         entry = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
10204         if (!entry) {
10205                 rte_flow_error_set(error, ENOMEM,
10206                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10207                                    "cannot allocate resource memory");
10208                 return NULL;
10209         }
10210         entry->idx = idx;
10211         entry->tag_id = key;
10212         ret = mlx5_flow_os_create_flow_action_tag(key,
10213                                                   &entry->action);
10214         if (ret) {
10215                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], idx);
10216                 rte_flow_error_set(error, ENOMEM,
10217                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10218                                    NULL, "cannot create action");
10219                 return NULL;
10220         }
10221         return &entry->entry;
10222 }
10223
10224 int
10225 flow_dv_tag_match_cb(struct mlx5_hlist *list __rte_unused,
10226                      struct mlx5_hlist_entry *entry, uint64_t key,
10227                      void *cb_ctx __rte_unused)
10228 {
10229         struct mlx5_flow_dv_tag_resource *tag =
10230                 container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10231
10232         return key != tag->tag_id;
10233 }
10234
10235 /**
10236  * Find existing tag resource or create and register a new one.
10237  *
10238  * @param dev[in, out]
10239  *   Pointer to rte_eth_dev structure.
10240  * @param[in, out] tag_be24
10241  *   Tag value in big endian then R-shift 8.
10242  * @parm[in, out] dev_flow
10243  *   Pointer to the dev_flow.
10244  * @param[out] error
10245  *   pointer to error structure.
10246  *
10247  * @return
10248  *   0 on success otherwise -errno and errno is set.
10249  */
10250 static int
10251 flow_dv_tag_resource_register
10252                         (struct rte_eth_dev *dev,
10253                          uint32_t tag_be24,
10254                          struct mlx5_flow *dev_flow,
10255                          struct rte_flow_error *error)
10256 {
10257         struct mlx5_priv *priv = dev->data->dev_private;
10258         struct mlx5_flow_dv_tag_resource *cache_resource;
10259         struct mlx5_hlist_entry *entry;
10260
10261         entry = mlx5_hlist_register(priv->sh->tag_table, tag_be24, error);
10262         if (entry) {
10263                 cache_resource = container_of
10264                         (entry, struct mlx5_flow_dv_tag_resource, entry);
10265                 dev_flow->handle->dvh.rix_tag = cache_resource->idx;
10266                 dev_flow->dv.tag_resource = cache_resource;
10267                 return 0;
10268         }
10269         return -rte_errno;
10270 }
10271
10272 void
10273 flow_dv_tag_remove_cb(struct mlx5_hlist *list,
10274                       struct mlx5_hlist_entry *entry)
10275 {
10276         struct mlx5_dev_ctx_shared *sh = list->ctx;
10277         struct mlx5_flow_dv_tag_resource *tag =
10278                 container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10279
10280         MLX5_ASSERT(tag && sh && tag->action);
10281         claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
10282         DRV_LOG(DEBUG, "Tag %p: removed.", (void *)tag);
10283         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
10284 }
10285
10286 /**
10287  * Release the tag.
10288  *
10289  * @param dev
10290  *   Pointer to Ethernet device.
10291  * @param tag_idx
10292  *   Tag index.
10293  *
10294  * @return
10295  *   1 while a reference on it exists, 0 when freed.
10296  */
10297 static int
10298 flow_dv_tag_release(struct rte_eth_dev *dev,
10299                     uint32_t tag_idx)
10300 {
10301         struct mlx5_priv *priv = dev->data->dev_private;
10302         struct mlx5_flow_dv_tag_resource *tag;
10303
10304         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
10305         if (!tag)
10306                 return 0;
10307         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
10308                 dev->data->port_id, (void *)tag, tag->entry.ref_cnt);
10309         return mlx5_hlist_unregister(priv->sh->tag_table, &tag->entry);
10310 }
10311
10312 /**
10313  * Translate port ID action to vport.
10314  *
10315  * @param[in] dev
10316  *   Pointer to rte_eth_dev structure.
10317  * @param[in] action
10318  *   Pointer to the port ID action.
10319  * @param[out] dst_port_id
10320  *   The target port ID.
10321  * @param[out] error
10322  *   Pointer to the error structure.
10323  *
10324  * @return
10325  *   0 on success, a negative errno value otherwise and rte_errno is set.
10326  */
10327 static int
10328 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
10329                                  const struct rte_flow_action *action,
10330                                  uint32_t *dst_port_id,
10331                                  struct rte_flow_error *error)
10332 {
10333         uint32_t port;
10334         struct mlx5_priv *priv;
10335         const struct rte_flow_action_port_id *conf =
10336                         (const struct rte_flow_action_port_id *)action->conf;
10337
10338         port = conf->original ? dev->data->port_id : conf->id;
10339         priv = mlx5_port_to_eswitch_info(port, false);
10340         if (!priv)
10341                 return rte_flow_error_set(error, -rte_errno,
10342                                           RTE_FLOW_ERROR_TYPE_ACTION,
10343                                           NULL,
10344                                           "No eswitch info was found for port");
10345 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
10346         /*
10347          * This parameter is transferred to
10348          * mlx5dv_dr_action_create_dest_ib_port().
10349          */
10350         *dst_port_id = priv->dev_port;
10351 #else
10352         /*
10353          * Legacy mode, no LAG configurations is supported.
10354          * This parameter is transferred to
10355          * mlx5dv_dr_action_create_dest_vport().
10356          */
10357         *dst_port_id = priv->vport_id;
10358 #endif
10359         return 0;
10360 }
10361
10362 /**
10363  * Create a counter with aging configuration.
10364  *
10365  * @param[in] dev
10366  *   Pointer to rte_eth_dev structure.
10367  * @param[in] dev_flow
10368  *   Pointer to the mlx5_flow.
10369  * @param[out] count
10370  *   Pointer to the counter action configuration.
10371  * @param[in] age
10372  *   Pointer to the aging action configuration.
10373  *
10374  * @return
10375  *   Index to flow counter on success, 0 otherwise.
10376  */
10377 static uint32_t
10378 flow_dv_translate_create_counter(struct rte_eth_dev *dev,
10379                                 struct mlx5_flow *dev_flow,
10380                                 const struct rte_flow_action_count *count,
10381                                 const struct rte_flow_action_age *age)
10382 {
10383         uint32_t counter;
10384         struct mlx5_age_param *age_param;
10385
10386         if (count && count->shared)
10387                 counter = flow_dv_counter_get_shared(dev, count->id);
10388         else
10389                 counter = flow_dv_counter_alloc(dev, !!age);
10390         if (!counter || age == NULL)
10391                 return counter;
10392         age_param = flow_dv_counter_idx_get_age(dev, counter);
10393         age_param->context = age->context ? age->context :
10394                 (void *)(uintptr_t)(dev_flow->flow_idx);
10395         age_param->timeout = age->timeout;
10396         age_param->port_id = dev->data->port_id;
10397         __atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);
10398         __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
10399         return counter;
10400 }
10401
10402 /**
10403  * Add Tx queue matcher
10404  *
10405  * @param[in] dev
10406  *   Pointer to the dev struct.
10407  * @param[in, out] matcher
10408  *   Flow matcher.
10409  * @param[in, out] key
10410  *   Flow matcher value.
10411  * @param[in] item
10412  *   Flow pattern to translate.
10413  * @param[in] inner
10414  *   Item is inner pattern.
10415  */
10416 static void
10417 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
10418                                 void *matcher, void *key,
10419                                 const struct rte_flow_item *item)
10420 {
10421         const struct mlx5_rte_flow_item_tx_queue *queue_m;
10422         const struct mlx5_rte_flow_item_tx_queue *queue_v;
10423         void *misc_m =
10424                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
10425         void *misc_v =
10426                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
10427         struct mlx5_txq_ctrl *txq;
10428         uint32_t queue;
10429
10430
10431         queue_m = (const void *)item->mask;
10432         if (!queue_m)
10433                 return;
10434         queue_v = (const void *)item->spec;
10435         if (!queue_v)
10436                 return;
10437         txq = mlx5_txq_get(dev, queue_v->queue);
10438         if (!txq)
10439                 return;
10440         queue = txq->obj->sq->id;
10441         MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue);
10442         MLX5_SET(fte_match_set_misc, misc_v, source_sqn,
10443                  queue & queue_m->queue);
10444         mlx5_txq_release(dev, queue_v->queue);
10445 }
10446
10447 /**
10448  * Set the hash fields according to the @p flow information.
10449  *
10450  * @param[in] dev_flow
10451  *   Pointer to the mlx5_flow.
10452  * @param[in] rss_desc
10453  *   Pointer to the mlx5_flow_rss_desc.
10454  */
10455 static void
10456 flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
10457                        struct mlx5_flow_rss_desc *rss_desc)
10458 {
10459         uint64_t items = dev_flow->handle->layers;
10460         int rss_inner = 0;
10461         uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
10462
10463         dev_flow->hash_fields = 0;
10464 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
10465         if (rss_desc->level >= 2) {
10466                 dev_flow->hash_fields |= IBV_RX_HASH_INNER;
10467                 rss_inner = 1;
10468         }
10469 #endif
10470         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
10471             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
10472                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
10473                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
10474                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
10475                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
10476                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
10477                         else
10478                                 dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
10479                 }
10480         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
10481                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
10482                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
10483                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
10484                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
10485                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
10486                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
10487                         else
10488                                 dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
10489                 }
10490         }
10491         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
10492             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
10493                 if (rss_types & ETH_RSS_UDP) {
10494                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
10495                                 dev_flow->hash_fields |=
10496                                                 IBV_RX_HASH_SRC_PORT_UDP;
10497                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
10498                                 dev_flow->hash_fields |=
10499                                                 IBV_RX_HASH_DST_PORT_UDP;
10500                         else
10501                                 dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
10502                 }
10503         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
10504                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
10505                 if (rss_types & ETH_RSS_TCP) {
10506                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
10507                                 dev_flow->hash_fields |=
10508                                                 IBV_RX_HASH_SRC_PORT_TCP;
10509                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
10510                                 dev_flow->hash_fields |=
10511                                                 IBV_RX_HASH_DST_PORT_TCP;
10512                         else
10513                                 dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
10514                 }
10515         }
10516 }
10517
10518 /**
10519  * Prepare an Rx Hash queue.
10520  *
10521  * @param dev
10522  *   Pointer to Ethernet device.
10523  * @param[in] dev_flow
10524  *   Pointer to the mlx5_flow.
10525  * @param[in] rss_desc
10526  *   Pointer to the mlx5_flow_rss_desc.
10527  * @param[out] hrxq_idx
10528  *   Hash Rx queue index.
10529  *
10530  * @return
10531  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
10532  */
10533 static struct mlx5_hrxq *
10534 flow_dv_hrxq_prepare(struct rte_eth_dev *dev,
10535                      struct mlx5_flow *dev_flow,
10536                      struct mlx5_flow_rss_desc *rss_desc,
10537                      uint32_t *hrxq_idx)
10538 {
10539         struct mlx5_priv *priv = dev->data->dev_private;
10540         struct mlx5_flow_handle *dh = dev_flow->handle;
10541         struct mlx5_hrxq *hrxq;
10542
10543         MLX5_ASSERT(rss_desc->queue_num);
10544         rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
10545         rss_desc->hash_fields = dev_flow->hash_fields;
10546         rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL);
10547         rss_desc->shared_rss = 0;
10548         *hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
10549         if (!*hrxq_idx)
10550                 return NULL;
10551         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
10552                               *hrxq_idx);
10553         return hrxq;
10554 }
10555
10556 /**
10557  * Release sample sub action resource.
10558  *
10559  * @param[in, out] dev
10560  *   Pointer to rte_eth_dev structure.
10561  * @param[in] act_res
10562  *   Pointer to sample sub action resource.
10563  */
10564 static void
10565 flow_dv_sample_sub_actions_release(struct rte_eth_dev *dev,
10566                                    struct mlx5_flow_sub_actions_idx *act_res)
10567 {
10568         if (act_res->rix_hrxq) {
10569                 mlx5_hrxq_release(dev, act_res->rix_hrxq);
10570                 act_res->rix_hrxq = 0;
10571         }
10572         if (act_res->rix_encap_decap) {
10573                 flow_dv_encap_decap_resource_release(dev,
10574                                                      act_res->rix_encap_decap);
10575                 act_res->rix_encap_decap = 0;
10576         }
10577         if (act_res->rix_port_id_action) {
10578                 flow_dv_port_id_action_resource_release(dev,
10579                                                 act_res->rix_port_id_action);
10580                 act_res->rix_port_id_action = 0;
10581         }
10582         if (act_res->rix_tag) {
10583                 flow_dv_tag_release(dev, act_res->rix_tag);
10584                 act_res->rix_tag = 0;
10585         }
10586         if (act_res->rix_jump) {
10587                 flow_dv_jump_tbl_resource_release(dev, act_res->rix_jump);
10588                 act_res->rix_jump = 0;
10589         }
10590 }
10591
10592 int
10593 flow_dv_sample_match_cb(struct mlx5_cache_list *list __rte_unused,
10594                         struct mlx5_cache_entry *entry, void *cb_ctx)
10595 {
10596         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10597         struct rte_eth_dev *dev = ctx->dev;
10598         struct mlx5_flow_dv_sample_resource *resource = ctx->data;
10599         struct mlx5_flow_dv_sample_resource *cache_resource =
10600                         container_of(entry, typeof(*cache_resource), entry);
10601
10602         if (resource->ratio == cache_resource->ratio &&
10603             resource->ft_type == cache_resource->ft_type &&
10604             resource->ft_id == cache_resource->ft_id &&
10605             resource->set_action == cache_resource->set_action &&
10606             !memcmp((void *)&resource->sample_act,
10607                     (void *)&cache_resource->sample_act,
10608                     sizeof(struct mlx5_flow_sub_actions_list))) {
10609                 /*
10610                  * Existing sample action should release the prepared
10611                  * sub-actions reference counter.
10612                  */
10613                 flow_dv_sample_sub_actions_release(dev,
10614                                                 &resource->sample_idx);
10615                 return 0;
10616         }
10617         return 1;
10618 }
10619
10620 struct mlx5_cache_entry *
10621 flow_dv_sample_create_cb(struct mlx5_cache_list *list __rte_unused,
10622                          struct mlx5_cache_entry *entry __rte_unused,
10623                          void *cb_ctx)
10624 {
10625         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10626         struct rte_eth_dev *dev = ctx->dev;
10627         struct mlx5_flow_dv_sample_resource *resource = ctx->data;
10628         void **sample_dv_actions = resource->sub_actions;
10629         struct mlx5_flow_dv_sample_resource *cache_resource;
10630         struct mlx5dv_dr_flow_sampler_attr sampler_attr;
10631         struct mlx5_priv *priv = dev->data->dev_private;
10632         struct mlx5_dev_ctx_shared *sh = priv->sh;
10633         struct mlx5_flow_tbl_resource *tbl;
10634         uint32_t idx = 0;
10635         const uint32_t next_ft_step = 1;
10636         uint32_t next_ft_id = resource->ft_id + next_ft_step;
10637         uint8_t is_egress = 0;
10638         uint8_t is_transfer = 0;
10639         struct rte_flow_error *error = ctx->error;
10640
10641         /* Register new sample resource. */
10642         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
10643         if (!cache_resource) {
10644                 rte_flow_error_set(error, ENOMEM,
10645                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10646                                           NULL,
10647                                           "cannot allocate resource memory");
10648                 return NULL;
10649         }
10650         *cache_resource = *resource;
10651         /* Create normal path table level */
10652         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
10653                 is_transfer = 1;
10654         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
10655                 is_egress = 1;
10656         tbl = flow_dv_tbl_resource_get(dev, next_ft_id,
10657                                         is_egress, is_transfer,
10658                                         true, NULL, 0, 0, 0, error);
10659         if (!tbl) {
10660                 rte_flow_error_set(error, ENOMEM,
10661                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10662                                           NULL,
10663                                           "fail to create normal path table "
10664                                           "for sample");
10665                 goto error;
10666         }
10667         cache_resource->normal_path_tbl = tbl;
10668         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
10669                 if (!sh->default_miss_action) {
10670                         rte_flow_error_set(error, ENOMEM,
10671                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10672                                                 NULL,
10673                                                 "default miss action was not "
10674                                                 "created");
10675                         goto error;
10676                 }
10677                 sample_dv_actions[resource->sample_act.actions_num++] =
10678                                                 sh->default_miss_action;
10679         }
10680         /* Create a DR sample action */
10681         sampler_attr.sample_ratio = cache_resource->ratio;
10682         sampler_attr.default_next_table = tbl->obj;
10683         sampler_attr.num_sample_actions = resource->sample_act.actions_num;
10684         sampler_attr.sample_actions = (struct mlx5dv_dr_action **)
10685                                                         &sample_dv_actions[0];
10686         sampler_attr.action = cache_resource->set_action;
10687         if (mlx5_os_flow_dr_create_flow_action_sampler
10688                         (&sampler_attr, &cache_resource->verbs_action)) {
10689                 rte_flow_error_set(error, ENOMEM,
10690                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10691                                         NULL, "cannot create sample action");
10692                 goto error;
10693         }
10694         cache_resource->idx = idx;
10695         cache_resource->dev = dev;
10696         return &cache_resource->entry;
10697 error:
10698         if (cache_resource->ft_type != MLX5DV_FLOW_TABLE_TYPE_FDB)
10699                 flow_dv_sample_sub_actions_release(dev,
10700                                                    &cache_resource->sample_idx);
10701         if (cache_resource->normal_path_tbl)
10702                 flow_dv_tbl_resource_release(MLX5_SH(dev),
10703                                 cache_resource->normal_path_tbl);
10704         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE], idx);
10705         return NULL;
10706
10707 }
10708
10709 /**
10710  * Find existing sample resource or create and register a new one.
10711  *
10712  * @param[in, out] dev
10713  *   Pointer to rte_eth_dev structure.
10714  * @param[in] resource
10715  *   Pointer to sample resource.
10716  * @parm[in, out] dev_flow
10717  *   Pointer to the dev_flow.
10718  * @param[out] error
10719  *   pointer to error structure.
10720  *
10721  * @return
10722  *   0 on success otherwise -errno and errno is set.
10723  */
10724 static int
10725 flow_dv_sample_resource_register(struct rte_eth_dev *dev,
10726                          struct mlx5_flow_dv_sample_resource *resource,
10727                          struct mlx5_flow *dev_flow,
10728                          struct rte_flow_error *error)
10729 {
10730         struct mlx5_flow_dv_sample_resource *cache_resource;
10731         struct mlx5_cache_entry *entry;
10732         struct mlx5_priv *priv = dev->data->dev_private;
10733         struct mlx5_flow_cb_ctx ctx = {
10734                 .dev = dev,
10735                 .error = error,
10736                 .data = resource,
10737         };
10738
10739         entry = mlx5_cache_register(&priv->sh->sample_action_list, &ctx);
10740         if (!entry)
10741                 return -rte_errno;
10742         cache_resource = container_of(entry, typeof(*cache_resource), entry);
10743         dev_flow->handle->dvh.rix_sample = cache_resource->idx;
10744         dev_flow->dv.sample_res = cache_resource;
10745         return 0;
10746 }
10747
10748 int
10749 flow_dv_dest_array_match_cb(struct mlx5_cache_list *list __rte_unused,
10750                             struct mlx5_cache_entry *entry, void *cb_ctx)
10751 {
10752         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10753         struct mlx5_flow_dv_dest_array_resource *resource = ctx->data;
10754         struct rte_eth_dev *dev = ctx->dev;
10755         struct mlx5_flow_dv_dest_array_resource *cache_resource =
10756                         container_of(entry, typeof(*cache_resource), entry);
10757         uint32_t idx = 0;
10758
10759         if (resource->num_of_dest == cache_resource->num_of_dest &&
10760             resource->ft_type == cache_resource->ft_type &&
10761             !memcmp((void *)cache_resource->sample_act,
10762                     (void *)resource->sample_act,
10763                    (resource->num_of_dest *
10764                    sizeof(struct mlx5_flow_sub_actions_list)))) {
10765                 /*
10766                  * Existing sample action should release the prepared
10767                  * sub-actions reference counter.
10768                  */
10769                 for (idx = 0; idx < resource->num_of_dest; idx++)
10770                         flow_dv_sample_sub_actions_release(dev,
10771                                         &resource->sample_idx[idx]);
10772                 return 0;
10773         }
10774         return 1;
10775 }
10776
10777 struct mlx5_cache_entry *
10778 flow_dv_dest_array_create_cb(struct mlx5_cache_list *list __rte_unused,
10779                          struct mlx5_cache_entry *entry __rte_unused,
10780                          void *cb_ctx)
10781 {
10782         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10783         struct rte_eth_dev *dev = ctx->dev;
10784         struct mlx5_flow_dv_dest_array_resource *cache_resource;
10785         struct mlx5_flow_dv_dest_array_resource *resource = ctx->data;
10786         struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 };
10787         struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM];
10788         struct mlx5_priv *priv = dev->data->dev_private;
10789         struct mlx5_dev_ctx_shared *sh = priv->sh;
10790         struct mlx5_flow_sub_actions_list *sample_act;
10791         struct mlx5dv_dr_domain *domain;
10792         uint32_t idx = 0, res_idx = 0;
10793         struct rte_flow_error *error = ctx->error;
10794         uint64_t action_flags;
10795         int ret;
10796
10797         /* Register new destination array resource. */
10798         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
10799                                             &res_idx);
10800         if (!cache_resource) {
10801                 rte_flow_error_set(error, ENOMEM,
10802                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10803                                           NULL,
10804                                           "cannot allocate resource memory");
10805                 return NULL;
10806         }
10807         *cache_resource = *resource;
10808         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
10809                 domain = sh->fdb_domain;
10810         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
10811                 domain = sh->rx_domain;
10812         else
10813                 domain = sh->tx_domain;
10814         for (idx = 0; idx < resource->num_of_dest; idx++) {
10815                 dest_attr[idx] = (struct mlx5dv_dr_action_dest_attr *)
10816                                  mlx5_malloc(MLX5_MEM_ZERO,
10817                                  sizeof(struct mlx5dv_dr_action_dest_attr),
10818                                  0, SOCKET_ID_ANY);
10819                 if (!dest_attr[idx]) {
10820                         rte_flow_error_set(error, ENOMEM,
10821                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10822                                            NULL,
10823                                            "cannot allocate resource memory");
10824                         goto error;
10825                 }
10826                 dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST;
10827                 sample_act = &resource->sample_act[idx];
10828                 action_flags = sample_act->action_flags;
10829                 switch (action_flags) {
10830                 case MLX5_FLOW_ACTION_QUEUE:
10831                         dest_attr[idx]->dest = sample_act->dr_queue_action;
10832                         break;
10833                 case (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_ENCAP):
10834                         dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST_REFORMAT;
10835                         dest_attr[idx]->dest_reformat = &dest_reformat[idx];
10836                         dest_attr[idx]->dest_reformat->reformat =
10837                                         sample_act->dr_encap_action;
10838                         dest_attr[idx]->dest_reformat->dest =
10839                                         sample_act->dr_port_id_action;
10840                         break;
10841                 case MLX5_FLOW_ACTION_PORT_ID:
10842                         dest_attr[idx]->dest = sample_act->dr_port_id_action;
10843                         break;
10844                 case MLX5_FLOW_ACTION_JUMP:
10845                         dest_attr[idx]->dest = sample_act->dr_jump_action;
10846                         break;
10847                 default:
10848                         rte_flow_error_set(error, EINVAL,
10849                                            RTE_FLOW_ERROR_TYPE_ACTION,
10850                                            NULL,
10851                                            "unsupported actions type");
10852                         goto error;
10853                 }
10854         }
10855         /* create a dest array actioin */
10856         ret = mlx5_os_flow_dr_create_flow_action_dest_array
10857                                                 (domain,
10858                                                  cache_resource->num_of_dest,
10859                                                  dest_attr,
10860                                                  &cache_resource->action);
10861         if (ret) {
10862                 rte_flow_error_set(error, ENOMEM,
10863                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10864                                    NULL,
10865                                    "cannot create destination array action");
10866                 goto error;
10867         }
10868         cache_resource->idx = res_idx;
10869         cache_resource->dev = dev;
10870         for (idx = 0; idx < resource->num_of_dest; idx++)
10871                 mlx5_free(dest_attr[idx]);
10872         return &cache_resource->entry;
10873 error:
10874         for (idx = 0; idx < resource->num_of_dest; idx++) {
10875                 flow_dv_sample_sub_actions_release(dev,
10876                                 &cache_resource->sample_idx[idx]);
10877                 if (dest_attr[idx])
10878                         mlx5_free(dest_attr[idx]);
10879         }
10880
10881         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY], res_idx);
10882         return NULL;
10883 }
10884
10885 /**
10886  * Find existing destination array resource or create and register a new one.
10887  *
10888  * @param[in, out] dev
10889  *   Pointer to rte_eth_dev structure.
10890  * @param[in] resource
10891  *   Pointer to destination array resource.
10892  * @parm[in, out] dev_flow
10893  *   Pointer to the dev_flow.
10894  * @param[out] error
10895  *   pointer to error structure.
10896  *
10897  * @return
10898  *   0 on success otherwise -errno and errno is set.
10899  */
10900 static int
10901 flow_dv_dest_array_resource_register(struct rte_eth_dev *dev,
10902                          struct mlx5_flow_dv_dest_array_resource *resource,
10903                          struct mlx5_flow *dev_flow,
10904                          struct rte_flow_error *error)
10905 {
10906         struct mlx5_flow_dv_dest_array_resource *cache_resource;
10907         struct mlx5_priv *priv = dev->data->dev_private;
10908         struct mlx5_cache_entry *entry;
10909         struct mlx5_flow_cb_ctx ctx = {
10910                 .dev = dev,
10911                 .error = error,
10912                 .data = resource,
10913         };
10914
10915         entry = mlx5_cache_register(&priv->sh->dest_array_list, &ctx);
10916         if (!entry)
10917                 return -rte_errno;
10918         cache_resource = container_of(entry, typeof(*cache_resource), entry);
10919         dev_flow->handle->dvh.rix_dest_array = cache_resource->idx;
10920         dev_flow->dv.dest_array_res = cache_resource;
10921         return 0;
10922 }
10923
10924 /**
10925  * Convert Sample action to DV specification.
10926  *
10927  * @param[in] dev
10928  *   Pointer to rte_eth_dev structure.
10929  * @param[in] action
10930  *   Pointer to sample action structure.
10931  * @param[in, out] dev_flow
10932  *   Pointer to the mlx5_flow.
10933  * @param[in] attr
10934  *   Pointer to the flow attributes.
10935  * @param[in, out] num_of_dest
10936  *   Pointer to the num of destination.
10937  * @param[in, out] sample_actions
10938  *   Pointer to sample actions list.
10939  * @param[in, out] res
10940  *   Pointer to sample resource.
10941  * @param[out] error
10942  *   Pointer to the error structure.
10943  *
10944  * @return
10945  *   0 on success, a negative errno value otherwise and rte_errno is set.
10946  */
10947 static int
10948 flow_dv_translate_action_sample(struct rte_eth_dev *dev,
10949                                 const struct rte_flow_action_sample *action,
10950                                 struct mlx5_flow *dev_flow,
10951                                 const struct rte_flow_attr *attr,
10952                                 uint32_t *num_of_dest,
10953                                 void **sample_actions,
10954                                 struct mlx5_flow_dv_sample_resource *res,
10955                                 struct rte_flow_error *error)
10956 {
10957         struct mlx5_priv *priv = dev->data->dev_private;
10958         const struct rte_flow_action *sub_actions;
10959         struct mlx5_flow_sub_actions_list *sample_act;
10960         struct mlx5_flow_sub_actions_idx *sample_idx;
10961         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
10962         struct rte_flow *flow = dev_flow->flow;
10963         struct mlx5_flow_rss_desc *rss_desc;
10964         uint64_t action_flags = 0;
10965
10966         MLX5_ASSERT(wks);
10967         rss_desc = &wks->rss_desc;
10968         sample_act = &res->sample_act;
10969         sample_idx = &res->sample_idx;
10970         res->ratio = action->ratio;
10971         sub_actions = action->actions;
10972         for (; sub_actions->type != RTE_FLOW_ACTION_TYPE_END; sub_actions++) {
10973                 int type = sub_actions->type;
10974                 uint32_t pre_rix = 0;
10975                 void *pre_r;
10976                 switch (type) {
10977                 case RTE_FLOW_ACTION_TYPE_QUEUE:
10978                 {
10979                         const struct rte_flow_action_queue *queue;
10980                         struct mlx5_hrxq *hrxq;
10981                         uint32_t hrxq_idx;
10982
10983                         queue = sub_actions->conf;
10984                         rss_desc->queue_num = 1;
10985                         rss_desc->queue[0] = queue->index;
10986                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
10987                                                     rss_desc, &hrxq_idx);
10988                         if (!hrxq)
10989                                 return rte_flow_error_set
10990                                         (error, rte_errno,
10991                                          RTE_FLOW_ERROR_TYPE_ACTION,
10992                                          NULL,
10993                                          "cannot create fate queue");
10994                         sample_act->dr_queue_action = hrxq->action;
10995                         sample_idx->rix_hrxq = hrxq_idx;
10996                         sample_actions[sample_act->actions_num++] =
10997                                                 hrxq->action;
10998                         (*num_of_dest)++;
10999                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
11000                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11001                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11002                         dev_flow->handle->fate_action =
11003                                         MLX5_FLOW_FATE_QUEUE;
11004                         break;
11005                 }
11006                 case RTE_FLOW_ACTION_TYPE_RSS:
11007                 {
11008                         struct mlx5_hrxq *hrxq;
11009                         uint32_t hrxq_idx;
11010                         const struct rte_flow_action_rss *rss;
11011                         const uint8_t *rss_key;
11012
11013                         rss = sub_actions->conf;
11014                         memcpy(rss_desc->queue, rss->queue,
11015                                rss->queue_num * sizeof(uint16_t));
11016                         rss_desc->queue_num = rss->queue_num;
11017                         /* NULL RSS key indicates default RSS key. */
11018                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
11019                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
11020                         /*
11021                          * rss->level and rss.types should be set in advance
11022                          * when expanding items for RSS.
11023                          */
11024                         flow_dv_hashfields_set(dev_flow, rss_desc);
11025                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11026                                                     rss_desc, &hrxq_idx);
11027                         if (!hrxq)
11028                                 return rte_flow_error_set
11029                                         (error, rte_errno,
11030                                          RTE_FLOW_ERROR_TYPE_ACTION,
11031                                          NULL,
11032                                          "cannot create fate queue");
11033                         sample_act->dr_queue_action = hrxq->action;
11034                         sample_idx->rix_hrxq = hrxq_idx;
11035                         sample_actions[sample_act->actions_num++] =
11036                                                 hrxq->action;
11037                         (*num_of_dest)++;
11038                         action_flags |= MLX5_FLOW_ACTION_RSS;
11039                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11040                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11041                         dev_flow->handle->fate_action =
11042                                         MLX5_FLOW_FATE_QUEUE;
11043                         break;
11044                 }
11045                 case RTE_FLOW_ACTION_TYPE_MARK:
11046                 {
11047                         uint32_t tag_be = mlx5_flow_mark_set
11048                                 (((const struct rte_flow_action_mark *)
11049                                 (sub_actions->conf))->id);
11050
11051                         dev_flow->handle->mark = 1;
11052                         pre_rix = dev_flow->handle->dvh.rix_tag;
11053                         /* Save the mark resource before sample */
11054                         pre_r = dev_flow->dv.tag_resource;
11055                         if (flow_dv_tag_resource_register(dev, tag_be,
11056                                                   dev_flow, error))
11057                                 return -rte_errno;
11058                         MLX5_ASSERT(dev_flow->dv.tag_resource);
11059                         sample_act->dr_tag_action =
11060                                 dev_flow->dv.tag_resource->action;
11061                         sample_idx->rix_tag =
11062                                 dev_flow->handle->dvh.rix_tag;
11063                         sample_actions[sample_act->actions_num++] =
11064                                                 sample_act->dr_tag_action;
11065                         /* Recover the mark resource after sample */
11066                         dev_flow->dv.tag_resource = pre_r;
11067                         dev_flow->handle->dvh.rix_tag = pre_rix;
11068                         action_flags |= MLX5_FLOW_ACTION_MARK;
11069                         break;
11070                 }
11071                 case RTE_FLOW_ACTION_TYPE_COUNT:
11072                 {
11073                         if (!flow->counter) {
11074                                 flow->counter =
11075                                         flow_dv_translate_create_counter(dev,
11076                                                 dev_flow, sub_actions->conf,
11077                                                 0);
11078                                 if (!flow->counter)
11079                                         return rte_flow_error_set
11080                                                 (error, rte_errno,
11081                                                 RTE_FLOW_ERROR_TYPE_ACTION,
11082                                                 NULL,
11083                                                 "cannot create counter"
11084                                                 " object.");
11085                         }
11086                         sample_act->dr_cnt_action =
11087                                   (flow_dv_counter_get_by_idx(dev,
11088                                   flow->counter, NULL))->action;
11089                         sample_actions[sample_act->actions_num++] =
11090                                                 sample_act->dr_cnt_action;
11091                         action_flags |= MLX5_FLOW_ACTION_COUNT;
11092                         break;
11093                 }
11094                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
11095                 {
11096                         struct mlx5_flow_dv_port_id_action_resource
11097                                         port_id_resource;
11098                         uint32_t port_id = 0;
11099
11100                         memset(&port_id_resource, 0, sizeof(port_id_resource));
11101                         /* Save the port id resource before sample */
11102                         pre_rix = dev_flow->handle->rix_port_id_action;
11103                         pre_r = dev_flow->dv.port_id_action;
11104                         if (flow_dv_translate_action_port_id(dev, sub_actions,
11105                                                              &port_id, error))
11106                                 return -rte_errno;
11107                         port_id_resource.port_id = port_id;
11108                         if (flow_dv_port_id_action_resource_register
11109                             (dev, &port_id_resource, dev_flow, error))
11110                                 return -rte_errno;
11111                         sample_act->dr_port_id_action =
11112                                 dev_flow->dv.port_id_action->action;
11113                         sample_idx->rix_port_id_action =
11114                                 dev_flow->handle->rix_port_id_action;
11115                         sample_actions[sample_act->actions_num++] =
11116                                                 sample_act->dr_port_id_action;
11117                         /* Recover the port id resource after sample */
11118                         dev_flow->dv.port_id_action = pre_r;
11119                         dev_flow->handle->rix_port_id_action = pre_rix;
11120                         (*num_of_dest)++;
11121                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
11122                         break;
11123                 }
11124                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
11125                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
11126                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
11127                         /* Save the encap resource before sample */
11128                         pre_rix = dev_flow->handle->dvh.rix_encap_decap;
11129                         pre_r = dev_flow->dv.encap_decap;
11130                         if (flow_dv_create_action_l2_encap(dev, sub_actions,
11131                                                            dev_flow,
11132                                                            attr->transfer,
11133                                                            error))
11134                                 return -rte_errno;
11135                         sample_act->dr_encap_action =
11136                                 dev_flow->dv.encap_decap->action;
11137                         sample_idx->rix_encap_decap =
11138                                 dev_flow->handle->dvh.rix_encap_decap;
11139                         sample_actions[sample_act->actions_num++] =
11140                                                 sample_act->dr_encap_action;
11141                         /* Recover the encap resource after sample */
11142                         dev_flow->dv.encap_decap = pre_r;
11143                         dev_flow->handle->dvh.rix_encap_decap = pre_rix;
11144                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
11145                         break;
11146                 default:
11147                         return rte_flow_error_set(error, EINVAL,
11148                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11149                                 NULL,
11150                                 "Not support for sampler action");
11151                 }
11152         }
11153         sample_act->action_flags = action_flags;
11154         res->ft_id = dev_flow->dv.group;
11155         if (attr->transfer) {
11156                 union {
11157                         uint32_t action_in[MLX5_ST_SZ_DW(set_action_in)];
11158                         uint64_t set_action;
11159                 } action_ctx = { .set_action = 0 };
11160
11161                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
11162                 MLX5_SET(set_action_in, action_ctx.action_in, action_type,
11163                          MLX5_MODIFICATION_TYPE_SET);
11164                 MLX5_SET(set_action_in, action_ctx.action_in, field,
11165                          MLX5_MODI_META_REG_C_0);
11166                 MLX5_SET(set_action_in, action_ctx.action_in, data,
11167                          priv->vport_meta_tag);
11168                 res->set_action = action_ctx.set_action;
11169         } else if (attr->ingress) {
11170                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
11171         } else {
11172                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX;
11173         }
11174         return 0;
11175 }
11176
11177 /**
11178  * Convert Sample action to DV specification.
11179  *
11180  * @param[in] dev
11181  *   Pointer to rte_eth_dev structure.
11182  * @param[in, out] dev_flow
11183  *   Pointer to the mlx5_flow.
11184  * @param[in] num_of_dest
11185  *   The num of destination.
11186  * @param[in, out] res
11187  *   Pointer to sample resource.
11188  * @param[in, out] mdest_res
11189  *   Pointer to destination array resource.
11190  * @param[in] sample_actions
11191  *   Pointer to sample path actions list.
11192  * @param[in] action_flags
11193  *   Holds the actions detected until now.
11194  * @param[out] error
11195  *   Pointer to the error structure.
11196  *
11197  * @return
11198  *   0 on success, a negative errno value otherwise and rte_errno is set.
11199  */
11200 static int
11201 flow_dv_create_action_sample(struct rte_eth_dev *dev,
11202                              struct mlx5_flow *dev_flow,
11203                              uint32_t num_of_dest,
11204                              struct mlx5_flow_dv_sample_resource *res,
11205                              struct mlx5_flow_dv_dest_array_resource *mdest_res,
11206                              void **sample_actions,
11207                              uint64_t action_flags,
11208                              struct rte_flow_error *error)
11209 {
11210         /* update normal path action resource into last index of array */
11211         uint32_t dest_index = MLX5_MAX_DEST_NUM - 1;
11212         struct mlx5_flow_sub_actions_list *sample_act =
11213                                         &mdest_res->sample_act[dest_index];
11214         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
11215         struct mlx5_flow_rss_desc *rss_desc;
11216         uint32_t normal_idx = 0;
11217         struct mlx5_hrxq *hrxq;
11218         uint32_t hrxq_idx;
11219
11220         MLX5_ASSERT(wks);
11221         rss_desc = &wks->rss_desc;
11222         if (num_of_dest > 1) {
11223                 if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
11224                         /* Handle QP action for mirroring */
11225                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11226                                                     rss_desc, &hrxq_idx);
11227                         if (!hrxq)
11228                                 return rte_flow_error_set
11229                                      (error, rte_errno,
11230                                       RTE_FLOW_ERROR_TYPE_ACTION,
11231                                       NULL,
11232                                       "cannot create rx queue");
11233                         normal_idx++;
11234                         mdest_res->sample_idx[dest_index].rix_hrxq = hrxq_idx;
11235                         sample_act->dr_queue_action = hrxq->action;
11236                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11237                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11238                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
11239                 }
11240                 if (sample_act->action_flags & MLX5_FLOW_ACTION_ENCAP) {
11241                         normal_idx++;
11242                         mdest_res->sample_idx[dest_index].rix_encap_decap =
11243                                 dev_flow->handle->dvh.rix_encap_decap;
11244                         sample_act->dr_encap_action =
11245                                 dev_flow->dv.encap_decap->action;
11246                         dev_flow->handle->dvh.rix_encap_decap = 0;
11247                 }
11248                 if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) {
11249                         normal_idx++;
11250                         mdest_res->sample_idx[dest_index].rix_port_id_action =
11251                                 dev_flow->handle->rix_port_id_action;
11252                         sample_act->dr_port_id_action =
11253                                 dev_flow->dv.port_id_action->action;
11254                         dev_flow->handle->rix_port_id_action = 0;
11255                 }
11256                 if (sample_act->action_flags & MLX5_FLOW_ACTION_JUMP) {
11257                         normal_idx++;
11258                         mdest_res->sample_idx[dest_index].rix_jump =
11259                                 dev_flow->handle->rix_jump;
11260                         sample_act->dr_jump_action =
11261                                 dev_flow->dv.jump->action;
11262                         dev_flow->handle->rix_jump = 0;
11263                 }
11264                 sample_act->actions_num = normal_idx;
11265                 /* update sample action resource into first index of array */
11266                 mdest_res->ft_type = res->ft_type;
11267                 memcpy(&mdest_res->sample_idx[0], &res->sample_idx,
11268                                 sizeof(struct mlx5_flow_sub_actions_idx));
11269                 memcpy(&mdest_res->sample_act[0], &res->sample_act,
11270                                 sizeof(struct mlx5_flow_sub_actions_list));
11271                 mdest_res->num_of_dest = num_of_dest;
11272                 if (flow_dv_dest_array_resource_register(dev, mdest_res,
11273                                                          dev_flow, error))
11274                         return rte_flow_error_set(error, EINVAL,
11275                                                   RTE_FLOW_ERROR_TYPE_ACTION,
11276                                                   NULL, "can't create sample "
11277                                                   "action");
11278         } else {
11279                 res->sub_actions = sample_actions;
11280                 if (flow_dv_sample_resource_register(dev, res, dev_flow, error))
11281                         return rte_flow_error_set(error, EINVAL,
11282                                                   RTE_FLOW_ERROR_TYPE_ACTION,
11283                                                   NULL,
11284                                                   "can't create sample action");
11285         }
11286         return 0;
11287 }
11288
11289 /**
11290  * Remove an ASO age action from age actions list.
11291  *
11292  * @param[in] dev
11293  *   Pointer to the Ethernet device structure.
11294  * @param[in] age
11295  *   Pointer to the aso age action handler.
11296  */
11297 static void
11298 flow_dv_aso_age_remove_from_age(struct rte_eth_dev *dev,
11299                                 struct mlx5_aso_age_action *age)
11300 {
11301         struct mlx5_age_info *age_info;
11302         struct mlx5_age_param *age_param = &age->age_params;
11303         struct mlx5_priv *priv = dev->data->dev_private;
11304         uint16_t expected = AGE_CANDIDATE;
11305
11306         age_info = GET_PORT_AGE_INFO(priv);
11307         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
11308                                          AGE_FREE, false, __ATOMIC_RELAXED,
11309                                          __ATOMIC_RELAXED)) {
11310                 /**
11311                  * We need the lock even it is age timeout,
11312                  * since age action may still in process.
11313                  */
11314                 rte_spinlock_lock(&age_info->aged_sl);
11315                 LIST_REMOVE(age, next);
11316                 rte_spinlock_unlock(&age_info->aged_sl);
11317                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
11318         }
11319 }
11320
11321 /**
11322  * Release an ASO age action.
11323  *
11324  * @param[in] dev
11325  *   Pointer to the Ethernet device structure.
11326  * @param[in] age_idx
11327  *   Index of ASO age action to release.
11328  * @param[in] flow
11329  *   True if the release operation is during flow destroy operation.
11330  *   False if the release operation is during action destroy operation.
11331  *
11332  * @return
11333  *   0 when age action was removed, otherwise the number of references.
11334  */
11335 static int
11336 flow_dv_aso_age_release(struct rte_eth_dev *dev, uint32_t age_idx)
11337 {
11338         struct mlx5_priv *priv = dev->data->dev_private;
11339         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11340         struct mlx5_aso_age_action *age = flow_aso_age_get_by_idx(dev, age_idx);
11341         uint32_t ret = __atomic_sub_fetch(&age->refcnt, 1, __ATOMIC_RELAXED);
11342
11343         if (!ret) {
11344                 flow_dv_aso_age_remove_from_age(dev, age);
11345                 rte_spinlock_lock(&mng->free_sl);
11346                 LIST_INSERT_HEAD(&mng->free, age, next);
11347                 rte_spinlock_unlock(&mng->free_sl);
11348         }
11349         return ret;
11350 }
11351
11352 /**
11353  * Resize the ASO age pools array by MLX5_CNT_CONTAINER_RESIZE pools.
11354  *
11355  * @param[in] dev
11356  *   Pointer to the Ethernet device structure.
11357  *
11358  * @return
11359  *   0 on success, otherwise negative errno value and rte_errno is set.
11360  */
11361 static int
11362 flow_dv_aso_age_pools_resize(struct rte_eth_dev *dev)
11363 {
11364         struct mlx5_priv *priv = dev->data->dev_private;
11365         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11366         void *old_pools = mng->pools;
11367         uint32_t resize = mng->n + MLX5_CNT_CONTAINER_RESIZE;
11368         uint32_t mem_size = sizeof(struct mlx5_aso_age_pool *) * resize;
11369         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
11370
11371         if (!pools) {
11372                 rte_errno = ENOMEM;
11373                 return -ENOMEM;
11374         }
11375         if (old_pools) {
11376                 memcpy(pools, old_pools,
11377                        mng->n * sizeof(struct mlx5_flow_counter_pool *));
11378                 mlx5_free(old_pools);
11379         } else {
11380                 /* First ASO flow hit allocation - starting ASO data-path. */
11381                 int ret = mlx5_aso_flow_hit_queue_poll_start(priv->sh);
11382
11383                 if (ret) {
11384                         mlx5_free(pools);
11385                         return ret;
11386                 }
11387         }
11388         mng->n = resize;
11389         mng->pools = pools;
11390         return 0;
11391 }
11392
11393 /**
11394  * Create and initialize a new ASO aging pool.
11395  *
11396  * @param[in] dev
11397  *   Pointer to the Ethernet device structure.
11398  * @param[out] age_free
11399  *   Where to put the pointer of a new age action.
11400  *
11401  * @return
11402  *   The age actions pool pointer and @p age_free is set on success,
11403  *   NULL otherwise and rte_errno is set.
11404  */
11405 static struct mlx5_aso_age_pool *
11406 flow_dv_age_pool_create(struct rte_eth_dev *dev,
11407                         struct mlx5_aso_age_action **age_free)
11408 {
11409         struct mlx5_priv *priv = dev->data->dev_private;
11410         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11411         struct mlx5_aso_age_pool *pool = NULL;
11412         struct mlx5_devx_obj *obj = NULL;
11413         uint32_t i;
11414
11415         obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->ctx,
11416                                                     priv->sh->pdn);
11417         if (!obj) {
11418                 rte_errno = ENODATA;
11419                 DRV_LOG(ERR, "Failed to create flow_hit_aso_obj using DevX.");
11420                 return NULL;
11421         }
11422         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
11423         if (!pool) {
11424                 claim_zero(mlx5_devx_cmd_destroy(obj));
11425                 rte_errno = ENOMEM;
11426                 return NULL;
11427         }
11428         pool->flow_hit_aso_obj = obj;
11429         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
11430         rte_spinlock_lock(&mng->resize_sl);
11431         pool->index = mng->next;
11432         /* Resize pools array if there is no room for the new pool in it. */
11433         if (pool->index == mng->n && flow_dv_aso_age_pools_resize(dev)) {
11434                 claim_zero(mlx5_devx_cmd_destroy(obj));
11435                 mlx5_free(pool);
11436                 rte_spinlock_unlock(&mng->resize_sl);
11437                 return NULL;
11438         }
11439         mng->pools[pool->index] = pool;
11440         mng->next++;
11441         rte_spinlock_unlock(&mng->resize_sl);
11442         /* Assign the first action in the new pool, the rest go to free list. */
11443         *age_free = &pool->actions[0];
11444         for (i = 1; i < MLX5_ASO_AGE_ACTIONS_PER_POOL; i++) {
11445                 pool->actions[i].offset = i;
11446                 LIST_INSERT_HEAD(&mng->free, &pool->actions[i], next);
11447         }
11448         return pool;
11449 }
11450
11451 /**
11452  * Allocate a ASO aging bit.
11453  *
11454  * @param[in] dev
11455  *   Pointer to the Ethernet device structure.
11456  * @param[out] error
11457  *   Pointer to the error structure.
11458  *
11459  * @return
11460  *   Index to ASO age action on success, 0 otherwise and rte_errno is set.
11461  */
11462 static uint32_t
11463 flow_dv_aso_age_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
11464 {
11465         struct mlx5_priv *priv = dev->data->dev_private;
11466         const struct mlx5_aso_age_pool *pool;
11467         struct mlx5_aso_age_action *age_free = NULL;
11468         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11469
11470         MLX5_ASSERT(mng);
11471         /* Try to get the next free age action bit. */
11472         rte_spinlock_lock(&mng->free_sl);
11473         age_free = LIST_FIRST(&mng->free);
11474         if (age_free) {
11475                 LIST_REMOVE(age_free, next);
11476         } else if (!flow_dv_age_pool_create(dev, &age_free)) {
11477                 rte_spinlock_unlock(&mng->free_sl);
11478                 rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
11479                                    NULL, "failed to create ASO age pool");
11480                 return 0; /* 0 is an error. */
11481         }
11482         rte_spinlock_unlock(&mng->free_sl);
11483         pool = container_of
11484           ((const struct mlx5_aso_age_action (*)[MLX5_ASO_AGE_ACTIONS_PER_POOL])
11485                   (age_free - age_free->offset), const struct mlx5_aso_age_pool,
11486                                                                        actions);
11487         if (!age_free->dr_action) {
11488                 int reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_FLOW_HIT, 0,
11489                                                  error);
11490
11491                 if (reg_c < 0) {
11492                         rte_flow_error_set(error, rte_errno,
11493                                            RTE_FLOW_ERROR_TYPE_ACTION,
11494                                            NULL, "failed to get reg_c "
11495                                            "for ASO flow hit");
11496                         return 0; /* 0 is an error. */
11497                 }
11498 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
11499                 age_free->dr_action = mlx5_glue->dv_create_flow_action_aso
11500                                 (priv->sh->rx_domain,
11501                                  pool->flow_hit_aso_obj->obj, age_free->offset,
11502                                  MLX5DV_DR_ACTION_FLAGS_ASO_FIRST_HIT_SET,
11503                                  (reg_c - REG_C_0));
11504 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
11505                 if (!age_free->dr_action) {
11506                         rte_errno = errno;
11507                         rte_spinlock_lock(&mng->free_sl);
11508                         LIST_INSERT_HEAD(&mng->free, age_free, next);
11509                         rte_spinlock_unlock(&mng->free_sl);
11510                         rte_flow_error_set(error, rte_errno,
11511                                            RTE_FLOW_ERROR_TYPE_ACTION,
11512                                            NULL, "failed to create ASO "
11513                                            "flow hit action");
11514                         return 0; /* 0 is an error. */
11515                 }
11516         }
11517         __atomic_store_n(&age_free->refcnt, 1, __ATOMIC_RELAXED);
11518         return pool->index | ((age_free->offset + 1) << 16);
11519 }
11520
11521 /**
11522  * Initialize flow ASO age parameters.
11523  *
11524  * @param[in] dev
11525  *   Pointer to rte_eth_dev structure.
11526  * @param[in] age_idx
11527  *   Index of ASO age action.
11528  * @param[in] context
11529  *   Pointer to flow counter age context.
11530  * @param[in] timeout
11531  *   Aging timeout in seconds.
11532  *
11533  */
11534 static void
11535 flow_dv_aso_age_params_init(struct rte_eth_dev *dev,
11536                             uint32_t age_idx,
11537                             void *context,
11538                             uint32_t timeout)
11539 {
11540         struct mlx5_aso_age_action *aso_age;
11541
11542         aso_age = flow_aso_age_get_by_idx(dev, age_idx);
11543         MLX5_ASSERT(aso_age);
11544         aso_age->age_params.context = context;
11545         aso_age->age_params.timeout = timeout;
11546         aso_age->age_params.port_id = dev->data->port_id;
11547         __atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0,
11548                          __ATOMIC_RELAXED);
11549         __atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE,
11550                          __ATOMIC_RELAXED);
11551 }
11552
11553 static void
11554 flow_dv_translate_integrity_l4(const struct rte_flow_item_integrity *mask,
11555                                const struct rte_flow_item_integrity *value,
11556                                void *headers_m, void *headers_v)
11557 {
11558         if (mask->l4_ok) {
11559                 /* application l4_ok filter aggregates all hardware l4 filters
11560                  * therefore hw l4_checksum_ok must be implicitly added here.
11561                  */
11562                 struct rte_flow_item_integrity local_item;
11563
11564                 local_item.l4_csum_ok = 1;
11565                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok,
11566                          local_item.l4_csum_ok);
11567                 if (value->l4_ok) {
11568                         /* application l4_ok = 1 matches sets both hw flags
11569                          * l4_ok and l4_checksum_ok flags to 1.
11570                          */
11571                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
11572                                  l4_checksum_ok, local_item.l4_csum_ok);
11573                         MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_ok,
11574                                  mask->l4_ok);
11575                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_ok,
11576                                  value->l4_ok);
11577                 } else {
11578                         /* application l4_ok = 0 matches on hw flag
11579                          * l4_checksum_ok = 0 only.
11580                          */
11581                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
11582                                  l4_checksum_ok, 0);
11583                 }
11584         } else if (mask->l4_csum_ok) {
11585                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok,
11586                          mask->l4_csum_ok);
11587                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_checksum_ok,
11588                          value->l4_csum_ok);
11589         }
11590 }
11591
11592 static void
11593 flow_dv_translate_integrity_l3(const struct rte_flow_item_integrity *mask,
11594                                const struct rte_flow_item_integrity *value,
11595                                void *headers_m, void *headers_v,
11596                                bool is_ipv4)
11597 {
11598         if (mask->l3_ok) {
11599                 /* application l3_ok filter aggregates all hardware l3 filters
11600                  * therefore hw ipv4_checksum_ok must be implicitly added here.
11601                  */
11602                 struct rte_flow_item_integrity local_item;
11603
11604                 local_item.ipv4_csum_ok = !!is_ipv4;
11605                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok,
11606                          local_item.ipv4_csum_ok);
11607                 if (value->l3_ok) {
11608                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
11609                                  ipv4_checksum_ok, local_item.ipv4_csum_ok);
11610                         MLX5_SET(fte_match_set_lyr_2_4, headers_m, l3_ok,
11611                                  mask->l3_ok);
11612                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, l3_ok,
11613                                  value->l3_ok);
11614                 } else {
11615                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
11616                                  ipv4_checksum_ok, 0);
11617                 }
11618         } else if (mask->ipv4_csum_ok) {
11619                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok,
11620                          mask->ipv4_csum_ok);
11621                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_checksum_ok,
11622                          value->ipv4_csum_ok);
11623         }
11624 }
11625
11626 static void
11627 flow_dv_translate_item_integrity(void *matcher, void *key,
11628                                  const struct rte_flow_item *head_item,
11629                                  const struct rte_flow_item *integrity_item)
11630 {
11631         const struct rte_flow_item_integrity *mask = integrity_item->mask;
11632         const struct rte_flow_item_integrity *value = integrity_item->spec;
11633         const struct rte_flow_item *tunnel_item, *end_item, *item;
11634         void *headers_m;
11635         void *headers_v;
11636         uint32_t l3_protocol;
11637
11638         if (!value)
11639                 return;
11640         if (!mask)
11641                 mask = &rte_flow_item_integrity_mask;
11642         if (value->level > 1) {
11643                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
11644                                          inner_headers);
11645                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
11646         } else {
11647                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
11648                                          outer_headers);
11649                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
11650         }
11651         tunnel_item = mlx5_flow_find_tunnel_item(head_item);
11652         if (value->level > 1) {
11653                 /* tunnel item was verified during the item validation */
11654                 item = tunnel_item;
11655                 end_item = mlx5_find_end_item(tunnel_item);
11656         } else {
11657                 item = head_item;
11658                 end_item = tunnel_item ? tunnel_item :
11659                            mlx5_find_end_item(integrity_item);
11660         }
11661         l3_protocol = mask->l3_ok ?
11662                       mlx5_flow_locate_proto_l3(&item, end_item) : 0;
11663         flow_dv_translate_integrity_l3(mask, value, headers_m, headers_v,
11664                                        l3_protocol == RTE_ETHER_TYPE_IPV4);
11665         flow_dv_translate_integrity_l4(mask, value, headers_m, headers_v);
11666 }
11667
11668 /**
11669  * Prepares DV flow counter with aging configuration.
11670  * Gets it by index when exists, creates a new one when doesn't.
11671  *
11672  * @param[in] dev
11673  *   Pointer to rte_eth_dev structure.
11674  * @param[in] dev_flow
11675  *   Pointer to the mlx5_flow.
11676  * @param[in, out] flow
11677  *   Pointer to the sub flow.
11678  * @param[in] count
11679  *   Pointer to the counter action configuration.
11680  * @param[in] age
11681  *   Pointer to the aging action configuration.
11682  * @param[out] error
11683  *   Pointer to the error structure.
11684  *
11685  * @return
11686  *   Pointer to the counter, NULL otherwise.
11687  */
11688 static struct mlx5_flow_counter *
11689 flow_dv_prepare_counter(struct rte_eth_dev *dev,
11690                         struct mlx5_flow *dev_flow,
11691                         struct rte_flow *flow,
11692                         const struct rte_flow_action_count *count,
11693                         const struct rte_flow_action_age *age,
11694                         struct rte_flow_error *error)
11695 {
11696         if (!flow->counter) {
11697                 flow->counter = flow_dv_translate_create_counter(dev, dev_flow,
11698                                                                  count, age);
11699                 if (!flow->counter) {
11700                         rte_flow_error_set(error, rte_errno,
11701                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11702                                            "cannot create counter object.");
11703                         return NULL;
11704                 }
11705         }
11706         return flow_dv_counter_get_by_idx(dev, flow->counter, NULL);
11707 }
11708
11709 /*
11710  * Release an ASO CT action by its own device.
11711  *
11712  * @param[in] dev
11713  *   Pointer to the Ethernet device structure.
11714  * @param[in] idx
11715  *   Index of ASO CT action to release.
11716  *
11717  * @return
11718  *   0 when CT action was removed, otherwise the number of references.
11719  */
11720 static inline int
11721 flow_dv_aso_ct_dev_release(struct rte_eth_dev *dev, uint32_t idx)
11722 {
11723         struct mlx5_priv *priv = dev->data->dev_private;
11724         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
11725         uint32_t ret;
11726         struct mlx5_aso_ct_action *ct = flow_aso_ct_get_by_dev_idx(dev, idx);
11727         enum mlx5_aso_ct_state state =
11728                         __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
11729
11730         /* Cannot release when CT is in the ASO SQ. */
11731         if (state == ASO_CONNTRACK_WAIT || state == ASO_CONNTRACK_QUERY)
11732                 return -1;
11733         ret = __atomic_sub_fetch(&ct->refcnt, 1, __ATOMIC_RELAXED);
11734         if (!ret) {
11735                 if (ct->dr_action_orig) {
11736 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
11737                         claim_zero(mlx5_glue->destroy_flow_action
11738                                         (ct->dr_action_orig));
11739 #endif
11740                         ct->dr_action_orig = NULL;
11741                 }
11742                 if (ct->dr_action_rply) {
11743 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
11744                         claim_zero(mlx5_glue->destroy_flow_action
11745                                         (ct->dr_action_rply));
11746 #endif
11747                         ct->dr_action_rply = NULL;
11748                 }
11749                 /* Clear the state to free, no need in 1st allocation. */
11750                 MLX5_ASO_CT_UPDATE_STATE(ct, ASO_CONNTRACK_FREE);
11751                 rte_spinlock_lock(&mng->ct_sl);
11752                 LIST_INSERT_HEAD(&mng->free_cts, ct, next);
11753                 rte_spinlock_unlock(&mng->ct_sl);
11754         }
11755         return (int)ret;
11756 }
11757
11758 static inline int
11759 flow_dv_aso_ct_release(struct rte_eth_dev *dev, uint32_t own_idx)
11760 {
11761         uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(own_idx);
11762         uint32_t idx = MLX5_INDIRECT_ACT_CT_GET_IDX(own_idx);
11763         struct rte_eth_dev *owndev = &rte_eth_devices[owner];
11764         RTE_SET_USED(dev);
11765
11766         MLX5_ASSERT(owner < RTE_MAX_ETHPORTS);
11767         if (dev->data->dev_started != 1)
11768                 return -1;
11769         return flow_dv_aso_ct_dev_release(owndev, idx);
11770 }
11771
11772 /*
11773  * Resize the ASO CT pools array by 64 pools.
11774  *
11775  * @param[in] dev
11776  *   Pointer to the Ethernet device structure.
11777  *
11778  * @return
11779  *   0 on success, otherwise negative errno value and rte_errno is set.
11780  */
11781 static int
11782 flow_dv_aso_ct_pools_resize(struct rte_eth_dev *dev)
11783 {
11784         struct mlx5_priv *priv = dev->data->dev_private;
11785         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
11786         void *old_pools = mng->pools;
11787         /* Magic number now, need a macro. */
11788         uint32_t resize = mng->n + 64;
11789         uint32_t mem_size = sizeof(struct mlx5_aso_ct_pool *) * resize;
11790         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
11791
11792         if (!pools) {
11793                 rte_errno = ENOMEM;
11794                 return -rte_errno;
11795         }
11796         rte_rwlock_write_lock(&mng->resize_rwl);
11797         /* ASO SQ/QP was already initialized in the startup. */
11798         if (old_pools) {
11799                 /* Realloc could be an alternative choice. */
11800                 rte_memcpy(pools, old_pools,
11801                            mng->n * sizeof(struct mlx5_aso_ct_pool *));
11802                 mlx5_free(old_pools);
11803         }
11804         mng->n = resize;
11805         mng->pools = pools;
11806         rte_rwlock_write_unlock(&mng->resize_rwl);
11807         return 0;
11808 }
11809
11810 /*
11811  * Create and initialize a new ASO CT pool.
11812  *
11813  * @param[in] dev
11814  *   Pointer to the Ethernet device structure.
11815  * @param[out] ct_free
11816  *   Where to put the pointer of a new CT action.
11817  *
11818  * @return
11819  *   The CT actions pool pointer and @p ct_free is set on success,
11820  *   NULL otherwise and rte_errno is set.
11821  */
11822 static struct mlx5_aso_ct_pool *
11823 flow_dv_ct_pool_create(struct rte_eth_dev *dev,
11824                        struct mlx5_aso_ct_action **ct_free)
11825 {
11826         struct mlx5_priv *priv = dev->data->dev_private;
11827         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
11828         struct mlx5_aso_ct_pool *pool = NULL;
11829         struct mlx5_devx_obj *obj = NULL;
11830         uint32_t i;
11831         uint32_t log_obj_size = rte_log2_u32(MLX5_ASO_CT_ACTIONS_PER_POOL);
11832
11833         obj = mlx5_devx_cmd_create_conn_track_offload_obj(priv->sh->ctx,
11834                                                 priv->sh->pdn, log_obj_size);
11835         if (!obj) {
11836                 rte_errno = ENODATA;
11837                 DRV_LOG(ERR, "Failed to create conn_track_offload_obj using DevX.");
11838                 return NULL;
11839         }
11840         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
11841         if (!pool) {
11842                 rte_errno = ENOMEM;
11843                 claim_zero(mlx5_devx_cmd_destroy(obj));
11844                 return NULL;
11845         }
11846         pool->devx_obj = obj;
11847         pool->index = mng->next;
11848         /* Resize pools array if there is no room for the new pool in it. */
11849         if (pool->index == mng->n && flow_dv_aso_ct_pools_resize(dev)) {
11850                 claim_zero(mlx5_devx_cmd_destroy(obj));
11851                 mlx5_free(pool);
11852                 return NULL;
11853         }
11854         mng->pools[pool->index] = pool;
11855         mng->next++;
11856         /* Assign the first action in the new pool, the rest go to free list. */
11857         *ct_free = &pool->actions[0];
11858         /* Lock outside, the list operation is safe here. */
11859         for (i = 1; i < MLX5_ASO_CT_ACTIONS_PER_POOL; i++) {
11860                 /* refcnt is 0 when allocating the memory. */
11861                 pool->actions[i].offset = i;
11862                 LIST_INSERT_HEAD(&mng->free_cts, &pool->actions[i], next);
11863         }
11864         return pool;
11865 }
11866
11867 /*
11868  * Allocate a ASO CT action from free list.
11869  *
11870  * @param[in] dev
11871  *   Pointer to the Ethernet device structure.
11872  * @param[out] error
11873  *   Pointer to the error structure.
11874  *
11875  * @return
11876  *   Index to ASO CT action on success, 0 otherwise and rte_errno is set.
11877  */
11878 static uint32_t
11879 flow_dv_aso_ct_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
11880 {
11881         struct mlx5_priv *priv = dev->data->dev_private;
11882         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
11883         struct mlx5_aso_ct_action *ct = NULL;
11884         struct mlx5_aso_ct_pool *pool;
11885         uint8_t reg_c;
11886         uint32_t ct_idx;
11887
11888         MLX5_ASSERT(mng);
11889         if (!priv->config.devx) {
11890                 rte_errno = ENOTSUP;
11891                 return 0;
11892         }
11893         /* Get a free CT action, if no, a new pool will be created. */
11894         rte_spinlock_lock(&mng->ct_sl);
11895         ct = LIST_FIRST(&mng->free_cts);
11896         if (ct) {
11897                 LIST_REMOVE(ct, next);
11898         } else if (!flow_dv_ct_pool_create(dev, &ct)) {
11899                 rte_spinlock_unlock(&mng->ct_sl);
11900                 rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
11901                                    NULL, "failed to create ASO CT pool");
11902                 return 0;
11903         }
11904         rte_spinlock_unlock(&mng->ct_sl);
11905         pool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);
11906         ct_idx = MLX5_MAKE_CT_IDX(pool->index, ct->offset);
11907         /* 0: inactive, 1: created, 2+: used by flows. */
11908         __atomic_store_n(&ct->refcnt, 1, __ATOMIC_RELAXED);
11909         reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, error);
11910         if (!ct->dr_action_orig) {
11911 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
11912                 ct->dr_action_orig = mlx5_glue->dv_create_flow_action_aso
11913                         (priv->sh->rx_domain, pool->devx_obj->obj,
11914                          ct->offset,
11915                          MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_INITIATOR,
11916                          reg_c - REG_C_0);
11917 #else
11918                 RTE_SET_USED(reg_c);
11919 #endif
11920                 if (!ct->dr_action_orig) {
11921                         flow_dv_aso_ct_dev_release(dev, ct_idx);
11922                         rte_flow_error_set(error, rte_errno,
11923                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11924                                            "failed to create ASO CT action");
11925                         return 0;
11926                 }
11927         }
11928         if (!ct->dr_action_rply) {
11929 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
11930                 ct->dr_action_rply = mlx5_glue->dv_create_flow_action_aso
11931                         (priv->sh->rx_domain, pool->devx_obj->obj,
11932                          ct->offset,
11933                          MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_RESPONDER,
11934                          reg_c - REG_C_0);
11935 #endif
11936                 if (!ct->dr_action_rply) {
11937                         flow_dv_aso_ct_dev_release(dev, ct_idx);
11938                         rte_flow_error_set(error, rte_errno,
11939                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11940                                            "failed to create ASO CT action");
11941                         return 0;
11942                 }
11943         }
11944         return ct_idx;
11945 }
11946
11947 /*
11948  * Create a conntrack object with context and actions by using ASO mechanism.
11949  *
11950  * @param[in] dev
11951  *   Pointer to rte_eth_dev structure.
11952  * @param[in] pro
11953  *   Pointer to conntrack information profile.
11954  * @param[out] error
11955  *   Pointer to the error structure.
11956  *
11957  * @return
11958  *   Index to conntrack object on success, 0 otherwise.
11959  */
11960 static uint32_t
11961 flow_dv_translate_create_conntrack(struct rte_eth_dev *dev,
11962                                    const struct rte_flow_action_conntrack *pro,
11963                                    struct rte_flow_error *error)
11964 {
11965         struct mlx5_priv *priv = dev->data->dev_private;
11966         struct mlx5_dev_ctx_shared *sh = priv->sh;
11967         struct mlx5_aso_ct_action *ct;
11968         uint32_t idx;
11969
11970         if (!sh->ct_aso_en)
11971                 return rte_flow_error_set(error, ENOTSUP,
11972                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11973                                           "Connection is not supported");
11974         idx = flow_dv_aso_ct_alloc(dev, error);
11975         if (!idx)
11976                 return rte_flow_error_set(error, rte_errno,
11977                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11978                                           "Failed to allocate CT object");
11979         ct = flow_aso_ct_get_by_dev_idx(dev, idx);
11980         if (mlx5_aso_ct_update_by_wqe(sh, ct, pro))
11981                 return rte_flow_error_set(error, EBUSY,
11982                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11983                                           "Failed to update CT");
11984         ct->is_original = !!pro->is_original_dir;
11985         ct->peer = pro->peer_port;
11986         return idx;
11987 }
11988
11989 /**
11990  * Fill the flow with DV spec, lock free
11991  * (mutex should be acquired by caller).
11992  *
11993  * @param[in] dev
11994  *   Pointer to rte_eth_dev structure.
11995  * @param[in, out] dev_flow
11996  *   Pointer to the sub flow.
11997  * @param[in] attr
11998  *   Pointer to the flow attributes.
11999  * @param[in] items
12000  *   Pointer to the list of items.
12001  * @param[in] actions
12002  *   Pointer to the list of actions.
12003  * @param[out] error
12004  *   Pointer to the error structure.
12005  *
12006  * @return
12007  *   0 on success, a negative errno value otherwise and rte_errno is set.
12008  */
12009 static int
12010 flow_dv_translate(struct rte_eth_dev *dev,
12011                   struct mlx5_flow *dev_flow,
12012                   const struct rte_flow_attr *attr,
12013                   const struct rte_flow_item items[],
12014                   const struct rte_flow_action actions[],
12015                   struct rte_flow_error *error)
12016 {
12017         struct mlx5_priv *priv = dev->data->dev_private;
12018         struct mlx5_dev_config *dev_conf = &priv->config;
12019         struct rte_flow *flow = dev_flow->flow;
12020         struct mlx5_flow_handle *handle = dev_flow->handle;
12021         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
12022         struct mlx5_flow_rss_desc *rss_desc;
12023         uint64_t item_flags = 0;
12024         uint64_t last_item = 0;
12025         uint64_t action_flags = 0;
12026         struct mlx5_flow_dv_matcher matcher = {
12027                 .mask = {
12028                         .size = sizeof(matcher.mask.buf) -
12029                                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
12030                 },
12031         };
12032         int actions_n = 0;
12033         bool actions_end = false;
12034         union {
12035                 struct mlx5_flow_dv_modify_hdr_resource res;
12036                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
12037                             sizeof(struct mlx5_modification_cmd) *
12038                             (MLX5_MAX_MODIFY_NUM + 1)];
12039         } mhdr_dummy;
12040         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
12041         const struct rte_flow_action_count *count = NULL;
12042         const struct rte_flow_action_age *non_shared_age = NULL;
12043         union flow_dv_attr flow_attr = { .attr = 0 };
12044         uint32_t tag_be;
12045         union mlx5_flow_tbl_key tbl_key;
12046         uint32_t modify_action_position = UINT32_MAX;
12047         void *match_mask = matcher.mask.buf;
12048         void *match_value = dev_flow->dv.value.buf;
12049         uint8_t next_protocol = 0xff;
12050         struct rte_vlan_hdr vlan = { 0 };
12051         struct mlx5_flow_dv_dest_array_resource mdest_res;
12052         struct mlx5_flow_dv_sample_resource sample_res;
12053         void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
12054         const struct rte_flow_action_sample *sample = NULL;
12055         struct mlx5_flow_sub_actions_list *sample_act;
12056         uint32_t sample_act_pos = UINT32_MAX;
12057         uint32_t age_act_pos = UINT32_MAX;
12058         uint32_t num_of_dest = 0;
12059         int tmp_actions_n = 0;
12060         uint32_t table;
12061         int ret = 0;
12062         const struct mlx5_flow_tunnel *tunnel = NULL;
12063         struct flow_grp_info grp_info = {
12064                 .external = !!dev_flow->external,
12065                 .transfer = !!attr->transfer,
12066                 .fdb_def_rule = !!priv->fdb_def_rule,
12067                 .skip_scale = dev_flow->skip_scale &
12068                         (1 << MLX5_SCALE_FLOW_GROUP_BIT),
12069                 .std_tbl_fix = true,
12070         };
12071         const struct rte_flow_item *head_item = items;
12072
12073         if (!wks)
12074                 return rte_flow_error_set(error, ENOMEM,
12075                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12076                                           NULL,
12077                                           "failed to push flow workspace");
12078         rss_desc = &wks->rss_desc;
12079         memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
12080         memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
12081         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
12082                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
12083         /* update normal path action resource into last index of array */
12084         sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
12085         if (is_tunnel_offload_active(dev)) {
12086                 if (dev_flow->tunnel) {
12087                         RTE_VERIFY(dev_flow->tof_type ==
12088                                    MLX5_TUNNEL_OFFLOAD_MISS_RULE);
12089                         tunnel = dev_flow->tunnel;
12090                 } else {
12091                         tunnel = mlx5_get_tof(items, actions,
12092                                               &dev_flow->tof_type);
12093                         dev_flow->tunnel = tunnel;
12094                 }
12095                 grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
12096                                         (dev, attr, tunnel, dev_flow->tof_type);
12097         }
12098         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
12099                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
12100         ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
12101                                        &grp_info, error);
12102         if (ret)
12103                 return ret;
12104         dev_flow->dv.group = table;
12105         if (attr->transfer)
12106                 mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
12107         /* number of actions must be set to 0 in case of dirty stack. */
12108         mhdr_res->actions_num = 0;
12109         if (is_flow_tunnel_match_rule(dev_flow->tof_type)) {
12110                 /*
12111                  * do not add decap action if match rule drops packet
12112                  * HW rejects rules with decap & drop
12113                  *
12114                  * if tunnel match rule was inserted before matching tunnel set
12115                  * rule flow table used in the match rule must be registered.
12116                  * current implementation handles that in the
12117                  * flow_dv_match_register() at the function end.
12118                  */
12119                 bool add_decap = true;
12120                 const struct rte_flow_action *ptr = actions;
12121
12122                 for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
12123                         if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
12124                                 add_decap = false;
12125                                 break;
12126                         }
12127                 }
12128                 if (add_decap) {
12129                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
12130                                                            attr->transfer,
12131                                                            error))
12132                                 return -rte_errno;
12133                         dev_flow->dv.actions[actions_n++] =
12134                                         dev_flow->dv.encap_decap->action;
12135                         action_flags |= MLX5_FLOW_ACTION_DECAP;
12136                 }
12137         }
12138         for (; !actions_end ; actions++) {
12139                 const struct rte_flow_action_queue *queue;
12140                 const struct rte_flow_action_rss *rss;
12141                 const struct rte_flow_action *action = actions;
12142                 const uint8_t *rss_key;
12143                 struct mlx5_flow_tbl_resource *tbl;
12144                 struct mlx5_aso_age_action *age_act;
12145                 struct mlx5_flow_counter *cnt_act;
12146                 uint32_t port_id = 0;
12147                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
12148                 int action_type = actions->type;
12149                 const struct rte_flow_action *found_action = NULL;
12150                 uint32_t jump_group = 0;
12151                 uint32_t owner_idx;
12152                 struct mlx5_aso_ct_action *ct;
12153
12154                 if (!mlx5_flow_os_action_supported(action_type))
12155                         return rte_flow_error_set(error, ENOTSUP,
12156                                                   RTE_FLOW_ERROR_TYPE_ACTION,
12157                                                   actions,
12158                                                   "action not supported");
12159                 switch (action_type) {
12160                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
12161                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
12162                         break;
12163                 case RTE_FLOW_ACTION_TYPE_VOID:
12164                         break;
12165                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
12166                         if (flow_dv_translate_action_port_id(dev, action,
12167                                                              &port_id, error))
12168                                 return -rte_errno;
12169                         port_id_resource.port_id = port_id;
12170                         MLX5_ASSERT(!handle->rix_port_id_action);
12171                         if (flow_dv_port_id_action_resource_register
12172                             (dev, &port_id_resource, dev_flow, error))
12173                                 return -rte_errno;
12174                         dev_flow->dv.actions[actions_n++] =
12175                                         dev_flow->dv.port_id_action->action;
12176                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
12177                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
12178                         sample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID;
12179                         num_of_dest++;
12180                         break;
12181                 case RTE_FLOW_ACTION_TYPE_FLAG:
12182                         action_flags |= MLX5_FLOW_ACTION_FLAG;
12183                         dev_flow->handle->mark = 1;
12184                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
12185                                 struct rte_flow_action_mark mark = {
12186                                         .id = MLX5_FLOW_MARK_DEFAULT,
12187                                 };
12188
12189                                 if (flow_dv_convert_action_mark(dev, &mark,
12190                                                                 mhdr_res,
12191                                                                 error))
12192                                         return -rte_errno;
12193                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
12194                                 break;
12195                         }
12196                         tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
12197                         /*
12198                          * Only one FLAG or MARK is supported per device flow
12199                          * right now. So the pointer to the tag resource must be
12200                          * zero before the register process.
12201                          */
12202                         MLX5_ASSERT(!handle->dvh.rix_tag);
12203                         if (flow_dv_tag_resource_register(dev, tag_be,
12204                                                           dev_flow, error))
12205                                 return -rte_errno;
12206                         MLX5_ASSERT(dev_flow->dv.tag_resource);
12207                         dev_flow->dv.actions[actions_n++] =
12208                                         dev_flow->dv.tag_resource->action;
12209                         break;
12210                 case RTE_FLOW_ACTION_TYPE_MARK:
12211                         action_flags |= MLX5_FLOW_ACTION_MARK;
12212                         dev_flow->handle->mark = 1;
12213                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
12214                                 const struct rte_flow_action_mark *mark =
12215                                         (const struct rte_flow_action_mark *)
12216                                                 actions->conf;
12217
12218                                 if (flow_dv_convert_action_mark(dev, mark,
12219                                                                 mhdr_res,
12220                                                                 error))
12221                                         return -rte_errno;
12222                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
12223                                 break;
12224                         }
12225                         /* Fall-through */
12226                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
12227                         /* Legacy (non-extensive) MARK action. */
12228                         tag_be = mlx5_flow_mark_set
12229                               (((const struct rte_flow_action_mark *)
12230                                (actions->conf))->id);
12231                         MLX5_ASSERT(!handle->dvh.rix_tag);
12232                         if (flow_dv_tag_resource_register(dev, tag_be,
12233                                                           dev_flow, error))
12234                                 return -rte_errno;
12235                         MLX5_ASSERT(dev_flow->dv.tag_resource);
12236                         dev_flow->dv.actions[actions_n++] =
12237                                         dev_flow->dv.tag_resource->action;
12238                         break;
12239                 case RTE_FLOW_ACTION_TYPE_SET_META:
12240                         if (flow_dv_convert_action_set_meta
12241                                 (dev, mhdr_res, attr,
12242                                  (const struct rte_flow_action_set_meta *)
12243                                   actions->conf, error))
12244                                 return -rte_errno;
12245                         action_flags |= MLX5_FLOW_ACTION_SET_META;
12246                         break;
12247                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
12248                         if (flow_dv_convert_action_set_tag
12249                                 (dev, mhdr_res,
12250                                  (const struct rte_flow_action_set_tag *)
12251                                   actions->conf, error))
12252                                 return -rte_errno;
12253                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
12254                         break;
12255                 case RTE_FLOW_ACTION_TYPE_DROP:
12256                         action_flags |= MLX5_FLOW_ACTION_DROP;
12257                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
12258                         break;
12259                 case RTE_FLOW_ACTION_TYPE_QUEUE:
12260                         queue = actions->conf;
12261                         rss_desc->queue_num = 1;
12262                         rss_desc->queue[0] = queue->index;
12263                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
12264                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
12265                         sample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE;
12266                         num_of_dest++;
12267                         break;
12268                 case RTE_FLOW_ACTION_TYPE_RSS:
12269                         rss = actions->conf;
12270                         memcpy(rss_desc->queue, rss->queue,
12271                                rss->queue_num * sizeof(uint16_t));
12272                         rss_desc->queue_num = rss->queue_num;
12273                         /* NULL RSS key indicates default RSS key. */
12274                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
12275                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
12276                         /*
12277                          * rss->level and rss.types should be set in advance
12278                          * when expanding items for RSS.
12279                          */
12280                         action_flags |= MLX5_FLOW_ACTION_RSS;
12281                         dev_flow->handle->fate_action = rss_desc->shared_rss ?
12282                                 MLX5_FLOW_FATE_SHARED_RSS :
12283                                 MLX5_FLOW_FATE_QUEUE;
12284                         break;
12285                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
12286                         flow->age = (uint32_t)(uintptr_t)(action->conf);
12287                         age_act = flow_aso_age_get_by_idx(dev, flow->age);
12288                         __atomic_fetch_add(&age_act->refcnt, 1,
12289                                            __ATOMIC_RELAXED);
12290                         age_act_pos = actions_n++;
12291                         action_flags |= MLX5_FLOW_ACTION_AGE;
12292                         break;
12293                 case RTE_FLOW_ACTION_TYPE_AGE:
12294                         non_shared_age = action->conf;
12295                         age_act_pos = actions_n++;
12296                         action_flags |= MLX5_FLOW_ACTION_AGE;
12297                         break;
12298                 case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
12299                         flow->counter = (uint32_t)(uintptr_t)(action->conf);
12300                         cnt_act = flow_dv_counter_get_by_idx(dev, flow->counter,
12301                                                              NULL);
12302                         __atomic_fetch_add(&cnt_act->shared_info.refcnt, 1,
12303                                            __ATOMIC_RELAXED);
12304                         /* Save information first, will apply later. */
12305                         action_flags |= MLX5_FLOW_ACTION_COUNT;
12306                         break;
12307                 case RTE_FLOW_ACTION_TYPE_COUNT:
12308                         if (!dev_conf->devx) {
12309                                 return rte_flow_error_set
12310                                               (error, ENOTSUP,
12311                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12312                                                NULL,
12313                                                "count action not supported");
12314                         }
12315                         /* Save information first, will apply later. */
12316                         count = action->conf;
12317                         action_flags |= MLX5_FLOW_ACTION_COUNT;
12318                         break;
12319                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
12320                         dev_flow->dv.actions[actions_n++] =
12321                                                 priv->sh->pop_vlan_action;
12322                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
12323                         break;
12324                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
12325                         if (!(action_flags &
12326                               MLX5_FLOW_ACTION_OF_SET_VLAN_VID))
12327                                 flow_dev_get_vlan_info_from_items(items, &vlan);
12328                         vlan.eth_proto = rte_be_to_cpu_16
12329                              ((((const struct rte_flow_action_of_push_vlan *)
12330                                                    actions->conf)->ethertype));
12331                         found_action = mlx5_flow_find_action
12332                                         (actions + 1,
12333                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
12334                         if (found_action)
12335                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
12336                         found_action = mlx5_flow_find_action
12337                                         (actions + 1,
12338                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
12339                         if (found_action)
12340                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
12341                         if (flow_dv_create_action_push_vlan
12342                                             (dev, attr, &vlan, dev_flow, error))
12343                                 return -rte_errno;
12344                         dev_flow->dv.actions[actions_n++] =
12345                                         dev_flow->dv.push_vlan_res->action;
12346                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
12347                         break;
12348                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
12349                         /* of_vlan_push action handled this action */
12350                         MLX5_ASSERT(action_flags &
12351                                     MLX5_FLOW_ACTION_OF_PUSH_VLAN);
12352                         break;
12353                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
12354                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
12355                                 break;
12356                         flow_dev_get_vlan_info_from_items(items, &vlan);
12357                         mlx5_update_vlan_vid_pcp(actions, &vlan);
12358                         /* If no VLAN push - this is a modify header action */
12359                         if (flow_dv_convert_action_modify_vlan_vid
12360                                                 (mhdr_res, actions, error))
12361                                 return -rte_errno;
12362                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
12363                         break;
12364                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
12365                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
12366                         if (flow_dv_create_action_l2_encap(dev, actions,
12367                                                            dev_flow,
12368                                                            attr->transfer,
12369                                                            error))
12370                                 return -rte_errno;
12371                         dev_flow->dv.actions[actions_n++] =
12372                                         dev_flow->dv.encap_decap->action;
12373                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
12374                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
12375                                 sample_act->action_flags |=
12376                                                         MLX5_FLOW_ACTION_ENCAP;
12377                         break;
12378                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
12379                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
12380                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
12381                                                            attr->transfer,
12382                                                            error))
12383                                 return -rte_errno;
12384                         dev_flow->dv.actions[actions_n++] =
12385                                         dev_flow->dv.encap_decap->action;
12386                         action_flags |= MLX5_FLOW_ACTION_DECAP;
12387                         break;
12388                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
12389                         /* Handle encap with preceding decap. */
12390                         if (action_flags & MLX5_FLOW_ACTION_DECAP) {
12391                                 if (flow_dv_create_action_raw_encap
12392                                         (dev, actions, dev_flow, attr, error))
12393                                         return -rte_errno;
12394                                 dev_flow->dv.actions[actions_n++] =
12395                                         dev_flow->dv.encap_decap->action;
12396                         } else {
12397                                 /* Handle encap without preceding decap. */
12398                                 if (flow_dv_create_action_l2_encap
12399                                     (dev, actions, dev_flow, attr->transfer,
12400                                      error))
12401                                         return -rte_errno;
12402                                 dev_flow->dv.actions[actions_n++] =
12403                                         dev_flow->dv.encap_decap->action;
12404                         }
12405                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
12406                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
12407                                 sample_act->action_flags |=
12408                                                         MLX5_FLOW_ACTION_ENCAP;
12409                         break;
12410                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
12411                         while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
12412                                 ;
12413                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
12414                                 if (flow_dv_create_action_l2_decap
12415                                     (dev, dev_flow, attr->transfer, error))
12416                                         return -rte_errno;
12417                                 dev_flow->dv.actions[actions_n++] =
12418                                         dev_flow->dv.encap_decap->action;
12419                         }
12420                         /* If decap is followed by encap, handle it at encap. */
12421                         action_flags |= MLX5_FLOW_ACTION_DECAP;
12422                         break;
12423                 case MLX5_RTE_FLOW_ACTION_TYPE_JUMP:
12424                         dev_flow->dv.actions[actions_n++] =
12425                                 (void *)(uintptr_t)action->conf;
12426                         action_flags |= MLX5_FLOW_ACTION_JUMP;
12427                         break;
12428                 case RTE_FLOW_ACTION_TYPE_JUMP:
12429                         jump_group = ((const struct rte_flow_action_jump *)
12430                                                         action->conf)->group;
12431                         grp_info.std_tbl_fix = 0;
12432                         if (dev_flow->skip_scale &
12433                                 (1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT))
12434                                 grp_info.skip_scale = 1;
12435                         else
12436                                 grp_info.skip_scale = 0;
12437                         ret = mlx5_flow_group_to_table(dev, tunnel,
12438                                                        jump_group,
12439                                                        &table,
12440                                                        &grp_info, error);
12441                         if (ret)
12442                                 return ret;
12443                         tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
12444                                                        attr->transfer,
12445                                                        !!dev_flow->external,
12446                                                        tunnel, jump_group, 0,
12447                                                        0, error);
12448                         if (!tbl)
12449                                 return rte_flow_error_set
12450                                                 (error, errno,
12451                                                  RTE_FLOW_ERROR_TYPE_ACTION,
12452                                                  NULL,
12453                                                  "cannot create jump action.");
12454                         if (flow_dv_jump_tbl_resource_register
12455                             (dev, tbl, dev_flow, error)) {
12456                                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
12457                                 return rte_flow_error_set
12458                                                 (error, errno,
12459                                                  RTE_FLOW_ERROR_TYPE_ACTION,
12460                                                  NULL,
12461                                                  "cannot create jump action.");
12462                         }
12463                         dev_flow->dv.actions[actions_n++] =
12464                                         dev_flow->dv.jump->action;
12465                         action_flags |= MLX5_FLOW_ACTION_JUMP;
12466                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;
12467                         sample_act->action_flags |= MLX5_FLOW_ACTION_JUMP;
12468                         num_of_dest++;
12469                         break;
12470                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
12471                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
12472                         if (flow_dv_convert_action_modify_mac
12473                                         (mhdr_res, actions, error))
12474                                 return -rte_errno;
12475                         action_flags |= actions->type ==
12476                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
12477                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
12478                                         MLX5_FLOW_ACTION_SET_MAC_DST;
12479                         break;
12480                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
12481                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
12482                         if (flow_dv_convert_action_modify_ipv4
12483                                         (mhdr_res, actions, error))
12484                                 return -rte_errno;
12485                         action_flags |= actions->type ==
12486                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
12487                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
12488                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
12489                         break;
12490                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
12491                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
12492                         if (flow_dv_convert_action_modify_ipv6
12493                                         (mhdr_res, actions, error))
12494                                 return -rte_errno;
12495                         action_flags |= actions->type ==
12496                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
12497                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
12498                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
12499                         break;
12500                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
12501                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
12502                         if (flow_dv_convert_action_modify_tp
12503                                         (mhdr_res, actions, items,
12504                                          &flow_attr, dev_flow, !!(action_flags &
12505                                          MLX5_FLOW_ACTION_DECAP), error))
12506                                 return -rte_errno;
12507                         action_flags |= actions->type ==
12508                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
12509                                         MLX5_FLOW_ACTION_SET_TP_SRC :
12510                                         MLX5_FLOW_ACTION_SET_TP_DST;
12511                         break;
12512                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
12513                         if (flow_dv_convert_action_modify_dec_ttl
12514                                         (mhdr_res, items, &flow_attr, dev_flow,
12515                                          !!(action_flags &
12516                                          MLX5_FLOW_ACTION_DECAP), error))
12517                                 return -rte_errno;
12518                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
12519                         break;
12520                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
12521                         if (flow_dv_convert_action_modify_ttl
12522                                         (mhdr_res, actions, items, &flow_attr,
12523                                          dev_flow, !!(action_flags &
12524                                          MLX5_FLOW_ACTION_DECAP), error))
12525                                 return -rte_errno;
12526                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
12527                         break;
12528                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
12529                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
12530                         if (flow_dv_convert_action_modify_tcp_seq
12531                                         (mhdr_res, actions, error))
12532                                 return -rte_errno;
12533                         action_flags |= actions->type ==
12534                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
12535                                         MLX5_FLOW_ACTION_INC_TCP_SEQ :
12536                                         MLX5_FLOW_ACTION_DEC_TCP_SEQ;
12537                         break;
12538
12539                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
12540                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
12541                         if (flow_dv_convert_action_modify_tcp_ack
12542                                         (mhdr_res, actions, error))
12543                                 return -rte_errno;
12544                         action_flags |= actions->type ==
12545                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
12546                                         MLX5_FLOW_ACTION_INC_TCP_ACK :
12547                                         MLX5_FLOW_ACTION_DEC_TCP_ACK;
12548                         break;
12549                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
12550                         if (flow_dv_convert_action_set_reg
12551                                         (mhdr_res, actions, error))
12552                                 return -rte_errno;
12553                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
12554                         break;
12555                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
12556                         if (flow_dv_convert_action_copy_mreg
12557                                         (dev, mhdr_res, actions, error))
12558                                 return -rte_errno;
12559                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
12560                         break;
12561                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
12562                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
12563                         dev_flow->handle->fate_action =
12564                                         MLX5_FLOW_FATE_DEFAULT_MISS;
12565                         break;
12566                 case RTE_FLOW_ACTION_TYPE_METER:
12567                         if (!wks->fm)
12568                                 return rte_flow_error_set(error, rte_errno,
12569                                         RTE_FLOW_ERROR_TYPE_ACTION,
12570                                         NULL, "Failed to get meter in flow.");
12571                         /* Set the meter action. */
12572                         dev_flow->dv.actions[actions_n++] =
12573                                 wks->fm->meter_action;
12574                         action_flags |= MLX5_FLOW_ACTION_METER;
12575                         break;
12576                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
12577                         if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,
12578                                                               actions, error))
12579                                 return -rte_errno;
12580                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
12581                         break;
12582                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
12583                         if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,
12584                                                               actions, error))
12585                                 return -rte_errno;
12586                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
12587                         break;
12588                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
12589                         sample_act_pos = actions_n;
12590                         sample = (const struct rte_flow_action_sample *)
12591                                  action->conf;
12592                         actions_n++;
12593                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
12594                         /* put encap action into group if work with port id */
12595                         if ((action_flags & MLX5_FLOW_ACTION_ENCAP) &&
12596                             (action_flags & MLX5_FLOW_ACTION_PORT_ID))
12597                                 sample_act->action_flags |=
12598                                                         MLX5_FLOW_ACTION_ENCAP;
12599                         break;
12600                 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
12601                         if (flow_dv_convert_action_modify_field
12602                                         (dev, mhdr_res, actions, attr, error))
12603                                 return -rte_errno;
12604                         action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
12605                         break;
12606                 case RTE_FLOW_ACTION_TYPE_CONNTRACK:
12607                         owner_idx = (uint32_t)(uintptr_t)action->conf;
12608                         ct = flow_aso_ct_get_by_idx(dev, owner_idx);
12609                         if (!ct)
12610                                 return rte_flow_error_set(error, EINVAL,
12611                                                 RTE_FLOW_ERROR_TYPE_ACTION,
12612                                                 NULL,
12613                                                 "Failed to get CT object.");
12614                         if (mlx5_aso_ct_available(priv->sh, ct))
12615                                 return rte_flow_error_set(error, rte_errno,
12616                                                 RTE_FLOW_ERROR_TYPE_ACTION,
12617                                                 NULL,
12618                                                 "CT is unavailable.");
12619                         if (ct->is_original)
12620                                 dev_flow->dv.actions[actions_n] =
12621                                                         ct->dr_action_orig;
12622                         else
12623                                 dev_flow->dv.actions[actions_n] =
12624                                                         ct->dr_action_rply;
12625                         flow->indirect_type = MLX5_INDIRECT_ACTION_TYPE_CT;
12626                         flow->ct = owner_idx;
12627                         __atomic_fetch_add(&ct->refcnt, 1, __ATOMIC_RELAXED);
12628                         actions_n++;
12629                         action_flags |= MLX5_FLOW_ACTION_CT;
12630                         break;
12631                 case RTE_FLOW_ACTION_TYPE_END:
12632                         actions_end = true;
12633                         if (mhdr_res->actions_num) {
12634                                 /* create modify action if needed. */
12635                                 if (flow_dv_modify_hdr_resource_register
12636                                         (dev, mhdr_res, dev_flow, error))
12637                                         return -rte_errno;
12638                                 dev_flow->dv.actions[modify_action_position] =
12639                                         handle->dvh.modify_hdr->action;
12640                         }
12641                         /*
12642                          * Handle AGE and COUNT action by single HW counter
12643                          * when they are not shared.
12644                          */
12645                         if (action_flags & MLX5_FLOW_ACTION_AGE) {
12646                                 if ((non_shared_age &&
12647                                      count && !count->shared) ||
12648                                     !(priv->sh->flow_hit_aso_en &&
12649                                       (attr->group || attr->transfer))) {
12650                                         /* Creates age by counters. */
12651                                         cnt_act = flow_dv_prepare_counter
12652                                                                 (dev, dev_flow,
12653                                                                  flow, count,
12654                                                                  non_shared_age,
12655                                                                  error);
12656                                         if (!cnt_act)
12657                                                 return -rte_errno;
12658                                         dev_flow->dv.actions[age_act_pos] =
12659                                                                 cnt_act->action;
12660                                         break;
12661                                 }
12662                                 if (!flow->age && non_shared_age) {
12663                                         flow->age = flow_dv_aso_age_alloc
12664                                                                 (dev, error);
12665                                         if (!flow->age)
12666                                                 return -rte_errno;
12667                                         flow_dv_aso_age_params_init
12668                                                     (dev, flow->age,
12669                                                      non_shared_age->context ?
12670                                                      non_shared_age->context :
12671                                                      (void *)(uintptr_t)
12672                                                      (dev_flow->flow_idx),
12673                                                      non_shared_age->timeout);
12674                                 }
12675                                 age_act = flow_aso_age_get_by_idx(dev,
12676                                                                   flow->age);
12677                                 dev_flow->dv.actions[age_act_pos] =
12678                                                              age_act->dr_action;
12679                         }
12680                         if (action_flags & MLX5_FLOW_ACTION_COUNT) {
12681                                 /*
12682                                  * Create one count action, to be used
12683                                  * by all sub-flows.
12684                                  */
12685                                 cnt_act = flow_dv_prepare_counter(dev, dev_flow,
12686                                                                   flow, count,
12687                                                                   NULL, error);
12688                                 if (!cnt_act)
12689                                         return -rte_errno;
12690                                 dev_flow->dv.actions[actions_n++] =
12691                                                                 cnt_act->action;
12692                         }
12693                 default:
12694                         break;
12695                 }
12696                 if (mhdr_res->actions_num &&
12697                     modify_action_position == UINT32_MAX)
12698                         modify_action_position = actions_n++;
12699         }
12700         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
12701                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
12702                 int item_type = items->type;
12703
12704                 if (!mlx5_flow_os_item_supported(item_type))
12705                         return rte_flow_error_set(error, ENOTSUP,
12706                                                   RTE_FLOW_ERROR_TYPE_ITEM,
12707                                                   NULL, "item not supported");
12708                 switch (item_type) {
12709                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
12710                         flow_dv_translate_item_port_id
12711                                 (dev, match_mask, match_value, items, attr);
12712                         last_item = MLX5_FLOW_ITEM_PORT_ID;
12713                         break;
12714                 case RTE_FLOW_ITEM_TYPE_ETH:
12715                         flow_dv_translate_item_eth(match_mask, match_value,
12716                                                    items, tunnel,
12717                                                    dev_flow->dv.group);
12718                         matcher.priority = action_flags &
12719                                         MLX5_FLOW_ACTION_DEFAULT_MISS &&
12720                                         !dev_flow->external ?
12721                                         MLX5_PRIORITY_MAP_L3 :
12722                                         MLX5_PRIORITY_MAP_L2;
12723                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
12724                                              MLX5_FLOW_LAYER_OUTER_L2;
12725                         break;
12726                 case RTE_FLOW_ITEM_TYPE_VLAN:
12727                         flow_dv_translate_item_vlan(dev_flow,
12728                                                     match_mask, match_value,
12729                                                     items, tunnel,
12730                                                     dev_flow->dv.group);
12731                         matcher.priority = MLX5_PRIORITY_MAP_L2;
12732                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
12733                                               MLX5_FLOW_LAYER_INNER_VLAN) :
12734                                              (MLX5_FLOW_LAYER_OUTER_L2 |
12735                                               MLX5_FLOW_LAYER_OUTER_VLAN);
12736                         break;
12737                 case RTE_FLOW_ITEM_TYPE_IPV4:
12738                         mlx5_flow_tunnel_ip_check(items, next_protocol,
12739                                                   &item_flags, &tunnel);
12740                         flow_dv_translate_item_ipv4(match_mask, match_value,
12741                                                     items, tunnel,
12742                                                     dev_flow->dv.group);
12743                         matcher.priority = MLX5_PRIORITY_MAP_L3;
12744                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
12745                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
12746                         if (items->mask != NULL &&
12747                             ((const struct rte_flow_item_ipv4 *)
12748                              items->mask)->hdr.next_proto_id) {
12749                                 next_protocol =
12750                                         ((const struct rte_flow_item_ipv4 *)
12751                                          (items->spec))->hdr.next_proto_id;
12752                                 next_protocol &=
12753                                         ((const struct rte_flow_item_ipv4 *)
12754                                          (items->mask))->hdr.next_proto_id;
12755                         } else {
12756                                 /* Reset for inner layer. */
12757                                 next_protocol = 0xff;
12758                         }
12759                         break;
12760                 case RTE_FLOW_ITEM_TYPE_IPV6:
12761                         mlx5_flow_tunnel_ip_check(items, next_protocol,
12762                                                   &item_flags, &tunnel);
12763                         flow_dv_translate_item_ipv6(match_mask, match_value,
12764                                                     items, tunnel,
12765                                                     dev_flow->dv.group);
12766                         matcher.priority = MLX5_PRIORITY_MAP_L3;
12767                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
12768                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
12769                         if (items->mask != NULL &&
12770                             ((const struct rte_flow_item_ipv6 *)
12771                              items->mask)->hdr.proto) {
12772                                 next_protocol =
12773                                         ((const struct rte_flow_item_ipv6 *)
12774                                          items->spec)->hdr.proto;
12775                                 next_protocol &=
12776                                         ((const struct rte_flow_item_ipv6 *)
12777                                          items->mask)->hdr.proto;
12778                         } else {
12779                                 /* Reset for inner layer. */
12780                                 next_protocol = 0xff;
12781                         }
12782                         break;
12783                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
12784                         flow_dv_translate_item_ipv6_frag_ext(match_mask,
12785                                                              match_value,
12786                                                              items, tunnel);
12787                         last_item = tunnel ?
12788                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
12789                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
12790                         if (items->mask != NULL &&
12791                             ((const struct rte_flow_item_ipv6_frag_ext *)
12792                              items->mask)->hdr.next_header) {
12793                                 next_protocol =
12794                                 ((const struct rte_flow_item_ipv6_frag_ext *)
12795                                  items->spec)->hdr.next_header;
12796                                 next_protocol &=
12797                                 ((const struct rte_flow_item_ipv6_frag_ext *)
12798                                  items->mask)->hdr.next_header;
12799                         } else {
12800                                 /* Reset for inner layer. */
12801                                 next_protocol = 0xff;
12802                         }
12803                         break;
12804                 case RTE_FLOW_ITEM_TYPE_TCP:
12805                         flow_dv_translate_item_tcp(match_mask, match_value,
12806                                                    items, tunnel);
12807                         matcher.priority = MLX5_PRIORITY_MAP_L4;
12808                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
12809                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
12810                         break;
12811                 case RTE_FLOW_ITEM_TYPE_UDP:
12812                         flow_dv_translate_item_udp(match_mask, match_value,
12813                                                    items, tunnel);
12814                         matcher.priority = MLX5_PRIORITY_MAP_L4;
12815                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
12816                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
12817                         break;
12818                 case RTE_FLOW_ITEM_TYPE_GRE:
12819                         flow_dv_translate_item_gre(match_mask, match_value,
12820                                                    items, tunnel);
12821                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
12822                         last_item = MLX5_FLOW_LAYER_GRE;
12823                         break;
12824                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
12825                         flow_dv_translate_item_gre_key(match_mask,
12826                                                        match_value, items);
12827                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
12828                         break;
12829                 case RTE_FLOW_ITEM_TYPE_NVGRE:
12830                         flow_dv_translate_item_nvgre(match_mask, match_value,
12831                                                      items, tunnel);
12832                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
12833                         last_item = MLX5_FLOW_LAYER_GRE;
12834                         break;
12835                 case RTE_FLOW_ITEM_TYPE_VXLAN:
12836                         flow_dv_translate_item_vxlan(match_mask, match_value,
12837                                                      items, tunnel);
12838                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
12839                         last_item = MLX5_FLOW_LAYER_VXLAN;
12840                         break;
12841                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
12842                         flow_dv_translate_item_vxlan_gpe(match_mask,
12843                                                          match_value, items,
12844                                                          tunnel);
12845                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
12846                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
12847                         break;
12848                 case RTE_FLOW_ITEM_TYPE_GENEVE:
12849                         flow_dv_translate_item_geneve(match_mask, match_value,
12850                                                       items, tunnel);
12851                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
12852                         last_item = MLX5_FLOW_LAYER_GENEVE;
12853                         break;
12854                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
12855                         ret = flow_dv_translate_item_geneve_opt(dev, match_mask,
12856                                                           match_value,
12857                                                           items, error);
12858                         if (ret)
12859                                 return rte_flow_error_set(error, -ret,
12860                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
12861                                         "cannot create GENEVE TLV option");
12862                         flow->geneve_tlv_option = 1;
12863                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
12864                         break;
12865                 case RTE_FLOW_ITEM_TYPE_MPLS:
12866                         flow_dv_translate_item_mpls(match_mask, match_value,
12867                                                     items, last_item, tunnel);
12868                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
12869                         last_item = MLX5_FLOW_LAYER_MPLS;
12870                         break;
12871                 case RTE_FLOW_ITEM_TYPE_MARK:
12872                         flow_dv_translate_item_mark(dev, match_mask,
12873                                                     match_value, items);
12874                         last_item = MLX5_FLOW_ITEM_MARK;
12875                         break;
12876                 case RTE_FLOW_ITEM_TYPE_META:
12877                         flow_dv_translate_item_meta(dev, match_mask,
12878                                                     match_value, attr, items);
12879                         last_item = MLX5_FLOW_ITEM_METADATA;
12880                         break;
12881                 case RTE_FLOW_ITEM_TYPE_ICMP:
12882                         flow_dv_translate_item_icmp(match_mask, match_value,
12883                                                     items, tunnel);
12884                         last_item = MLX5_FLOW_LAYER_ICMP;
12885                         break;
12886                 case RTE_FLOW_ITEM_TYPE_ICMP6:
12887                         flow_dv_translate_item_icmp6(match_mask, match_value,
12888                                                       items, tunnel);
12889                         last_item = MLX5_FLOW_LAYER_ICMP6;
12890                         break;
12891                 case RTE_FLOW_ITEM_TYPE_TAG:
12892                         flow_dv_translate_item_tag(dev, match_mask,
12893                                                    match_value, items);
12894                         last_item = MLX5_FLOW_ITEM_TAG;
12895                         break;
12896                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
12897                         flow_dv_translate_mlx5_item_tag(dev, match_mask,
12898                                                         match_value, items);
12899                         last_item = MLX5_FLOW_ITEM_TAG;
12900                         break;
12901                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
12902                         flow_dv_translate_item_tx_queue(dev, match_mask,
12903                                                         match_value,
12904                                                         items);
12905                         last_item = MLX5_FLOW_ITEM_TX_QUEUE;
12906                         break;
12907                 case RTE_FLOW_ITEM_TYPE_GTP:
12908                         flow_dv_translate_item_gtp(match_mask, match_value,
12909                                                    items, tunnel);
12910                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
12911                         last_item = MLX5_FLOW_LAYER_GTP;
12912                         break;
12913                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
12914                         ret = flow_dv_translate_item_gtp_psc(match_mask,
12915                                                           match_value,
12916                                                           items);
12917                         if (ret)
12918                                 return rte_flow_error_set(error, -ret,
12919                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
12920                                         "cannot create GTP PSC item");
12921                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
12922                         break;
12923                 case RTE_FLOW_ITEM_TYPE_ECPRI:
12924                         if (!mlx5_flex_parser_ecpri_exist(dev)) {
12925                                 /* Create it only the first time to be used. */
12926                                 ret = mlx5_flex_parser_ecpri_alloc(dev);
12927                                 if (ret)
12928                                         return rte_flow_error_set
12929                                                 (error, -ret,
12930                                                 RTE_FLOW_ERROR_TYPE_ITEM,
12931                                                 NULL,
12932                                                 "cannot create eCPRI parser");
12933                         }
12934                         /* Adjust the length matcher and device flow value. */
12935                         matcher.mask.size = MLX5_ST_SZ_BYTES(fte_match_param);
12936                         dev_flow->dv.value.size =
12937                                         MLX5_ST_SZ_BYTES(fte_match_param);
12938                         flow_dv_translate_item_ecpri(dev, match_mask,
12939                                                      match_value, items);
12940                         /* No other protocol should follow eCPRI layer. */
12941                         last_item = MLX5_FLOW_LAYER_ECPRI;
12942                         break;
12943                 case RTE_FLOW_ITEM_TYPE_INTEGRITY:
12944                         flow_dv_translate_item_integrity(match_mask,
12945                                                          match_value,
12946                                                          head_item, items);
12947                         break;
12948                 case RTE_FLOW_ITEM_TYPE_CONNTRACK:
12949                         flow_dv_translate_item_aso_ct(dev, match_mask,
12950                                                       match_value, items);
12951                         break;
12952                 default:
12953                         break;
12954                 }
12955                 item_flags |= last_item;
12956         }
12957         /*
12958          * When E-Switch mode is enabled, we have two cases where we need to
12959          * set the source port manually.
12960          * The first one, is in case of Nic steering rule, and the second is
12961          * E-Switch rule where no port_id item was found. In both cases
12962          * the source port is set according the current port in use.
12963          */
12964         if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
12965             (priv->representor || priv->master)) {
12966                 if (flow_dv_translate_item_port_id(dev, match_mask,
12967                                                    match_value, NULL, attr))
12968                         return -rte_errno;
12969         }
12970 #ifdef RTE_LIBRTE_MLX5_DEBUG
12971         MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
12972                                               dev_flow->dv.value.buf));
12973 #endif
12974         /*
12975          * Layers may be already initialized from prefix flow if this dev_flow
12976          * is the suffix flow.
12977          */
12978         handle->layers |= item_flags;
12979         if (action_flags & MLX5_FLOW_ACTION_RSS)
12980                 flow_dv_hashfields_set(dev_flow, rss_desc);
12981         /* If has RSS action in the sample action, the Sample/Mirror resource
12982          * should be registered after the hash filed be update.
12983          */
12984         if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
12985                 ret = flow_dv_translate_action_sample(dev,
12986                                                       sample,
12987                                                       dev_flow, attr,
12988                                                       &num_of_dest,
12989                                                       sample_actions,
12990                                                       &sample_res,
12991                                                       error);
12992                 if (ret < 0)
12993                         return ret;
12994                 ret = flow_dv_create_action_sample(dev,
12995                                                    dev_flow,
12996                                                    num_of_dest,
12997                                                    &sample_res,
12998                                                    &mdest_res,
12999                                                    sample_actions,
13000                                                    action_flags,
13001                                                    error);
13002                 if (ret < 0)
13003                         return rte_flow_error_set
13004                                                 (error, rte_errno,
13005                                                 RTE_FLOW_ERROR_TYPE_ACTION,
13006                                                 NULL,
13007                                                 "cannot create sample action");
13008                 if (num_of_dest > 1) {
13009                         dev_flow->dv.actions[sample_act_pos] =
13010                         dev_flow->dv.dest_array_res->action;
13011                 } else {
13012                         dev_flow->dv.actions[sample_act_pos] =
13013                         dev_flow->dv.sample_res->verbs_action;
13014                 }
13015         }
13016         /*
13017          * For multiple destination (sample action with ratio=1), the encap
13018          * action and port id action will be combined into group action.
13019          * So need remove the original these actions in the flow and only
13020          * use the sample action instead of.
13021          */
13022         if (num_of_dest > 1 &&
13023             (sample_act->dr_port_id_action || sample_act->dr_jump_action)) {
13024                 int i;
13025                 void *temp_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
13026
13027                 for (i = 0; i < actions_n; i++) {
13028                         if ((sample_act->dr_encap_action &&
13029                                 sample_act->dr_encap_action ==
13030                                 dev_flow->dv.actions[i]) ||
13031                                 (sample_act->dr_port_id_action &&
13032                                 sample_act->dr_port_id_action ==
13033                                 dev_flow->dv.actions[i]) ||
13034                                 (sample_act->dr_jump_action &&
13035                                 sample_act->dr_jump_action ==
13036                                 dev_flow->dv.actions[i]))
13037                                 continue;
13038                         temp_actions[tmp_actions_n++] = dev_flow->dv.actions[i];
13039                 }
13040                 memcpy((void *)dev_flow->dv.actions,
13041                                 (void *)temp_actions,
13042                                 tmp_actions_n * sizeof(void *));
13043                 actions_n = tmp_actions_n;
13044         }
13045         dev_flow->dv.actions_n = actions_n;
13046         dev_flow->act_flags = action_flags;
13047         if (wks->skip_matcher_reg)
13048                 return 0;
13049         /* Register matcher. */
13050         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
13051                                     matcher.mask.size);
13052         matcher.priority = mlx5_get_matcher_priority(dev, attr,
13053                                         matcher.priority);
13054         /* reserved field no needs to be set to 0 here. */
13055         tbl_key.is_fdb = attr->transfer;
13056         tbl_key.is_egress = attr->egress;
13057         tbl_key.level = dev_flow->dv.group;
13058         tbl_key.id = dev_flow->dv.table_id;
13059         if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow,
13060                                      tunnel, attr->group, error))
13061                 return -rte_errno;
13062         return 0;
13063 }
13064
13065 /**
13066  * Set hash RX queue by hash fields (see enum ibv_rx_hash_fields)
13067  * and tunnel.
13068  *
13069  * @param[in, out] action
13070  *   Shred RSS action holding hash RX queue objects.
13071  * @param[in] hash_fields
13072  *   Defines combination of packet fields to participate in RX hash.
13073  * @param[in] tunnel
13074  *   Tunnel type
13075  * @param[in] hrxq_idx
13076  *   Hash RX queue index to set.
13077  *
13078  * @return
13079  *   0 on success, otherwise negative errno value.
13080  */
13081 static int
13082 __flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action,
13083                               const uint64_t hash_fields,
13084                               uint32_t hrxq_idx)
13085 {
13086         uint32_t *hrxqs = action->hrxq;
13087
13088         switch (hash_fields & ~IBV_RX_HASH_INNER) {
13089         case MLX5_RSS_HASH_IPV4:
13090                 /* fall-through. */
13091         case MLX5_RSS_HASH_IPV4_DST_ONLY:
13092                 /* fall-through. */
13093         case MLX5_RSS_HASH_IPV4_SRC_ONLY:
13094                 hrxqs[0] = hrxq_idx;
13095                 return 0;
13096         case MLX5_RSS_HASH_IPV4_TCP:
13097                 /* fall-through. */
13098         case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
13099                 /* fall-through. */
13100         case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
13101                 hrxqs[1] = hrxq_idx;
13102                 return 0;
13103         case MLX5_RSS_HASH_IPV4_UDP:
13104                 /* fall-through. */
13105         case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
13106                 /* fall-through. */
13107         case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
13108                 hrxqs[2] = hrxq_idx;
13109                 return 0;
13110         case MLX5_RSS_HASH_IPV6:
13111                 /* fall-through. */
13112         case MLX5_RSS_HASH_IPV6_DST_ONLY:
13113                 /* fall-through. */
13114         case MLX5_RSS_HASH_IPV6_SRC_ONLY:
13115                 hrxqs[3] = hrxq_idx;
13116                 return 0;
13117         case MLX5_RSS_HASH_IPV6_TCP:
13118                 /* fall-through. */
13119         case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
13120                 /* fall-through. */
13121         case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
13122                 hrxqs[4] = hrxq_idx;
13123                 return 0;
13124         case MLX5_RSS_HASH_IPV6_UDP:
13125                 /* fall-through. */
13126         case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
13127                 /* fall-through. */
13128         case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
13129                 hrxqs[5] = hrxq_idx;
13130                 return 0;
13131         case MLX5_RSS_HASH_NONE:
13132                 hrxqs[6] = hrxq_idx;
13133                 return 0;
13134         default:
13135                 return -1;
13136         }
13137 }
13138
13139 /**
13140  * Look up for hash RX queue by hash fields (see enum ibv_rx_hash_fields)
13141  * and tunnel.
13142  *
13143  * @param[in] dev
13144  *   Pointer to the Ethernet device structure.
13145  * @param[in] idx
13146  *   Shared RSS action ID holding hash RX queue objects.
13147  * @param[in] hash_fields
13148  *   Defines combination of packet fields to participate in RX hash.
13149  * @param[in] tunnel
13150  *   Tunnel type
13151  *
13152  * @return
13153  *   Valid hash RX queue index, otherwise 0.
13154  */
13155 static uint32_t
13156 __flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
13157                                  const uint64_t hash_fields)
13158 {
13159         struct mlx5_priv *priv = dev->data->dev_private;
13160         struct mlx5_shared_action_rss *shared_rss =
13161             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
13162         const uint32_t *hrxqs = shared_rss->hrxq;
13163
13164         switch (hash_fields & ~IBV_RX_HASH_INNER) {
13165         case MLX5_RSS_HASH_IPV4:
13166                 /* fall-through. */
13167         case MLX5_RSS_HASH_IPV4_DST_ONLY:
13168                 /* fall-through. */
13169         case MLX5_RSS_HASH_IPV4_SRC_ONLY:
13170                 return hrxqs[0];
13171         case MLX5_RSS_HASH_IPV4_TCP:
13172                 /* fall-through. */
13173         case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
13174                 /* fall-through. */
13175         case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
13176                 return hrxqs[1];
13177         case MLX5_RSS_HASH_IPV4_UDP:
13178                 /* fall-through. */
13179         case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
13180                 /* fall-through. */
13181         case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
13182                 return hrxqs[2];
13183         case MLX5_RSS_HASH_IPV6:
13184                 /* fall-through. */
13185         case MLX5_RSS_HASH_IPV6_DST_ONLY:
13186                 /* fall-through. */
13187         case MLX5_RSS_HASH_IPV6_SRC_ONLY:
13188                 return hrxqs[3];
13189         case MLX5_RSS_HASH_IPV6_TCP:
13190                 /* fall-through. */
13191         case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
13192                 /* fall-through. */
13193         case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
13194                 return hrxqs[4];
13195         case MLX5_RSS_HASH_IPV6_UDP:
13196                 /* fall-through. */
13197         case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
13198                 /* fall-through. */
13199         case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
13200                 return hrxqs[5];
13201         case MLX5_RSS_HASH_NONE:
13202                 return hrxqs[6];
13203         default:
13204                 return 0;
13205         }
13206
13207 }
13208
13209 /**
13210  * Apply the flow to the NIC, lock free,
13211  * (mutex should be acquired by caller).
13212  *
13213  * @param[in] dev
13214  *   Pointer to the Ethernet device structure.
13215  * @param[in, out] flow
13216  *   Pointer to flow structure.
13217  * @param[out] error
13218  *   Pointer to error structure.
13219  *
13220  * @return
13221  *   0 on success, a negative errno value otherwise and rte_errno is set.
13222  */
13223 static int
13224 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
13225               struct rte_flow_error *error)
13226 {
13227         struct mlx5_flow_dv_workspace *dv;
13228         struct mlx5_flow_handle *dh;
13229         struct mlx5_flow_handle_dv *dv_h;
13230         struct mlx5_flow *dev_flow;
13231         struct mlx5_priv *priv = dev->data->dev_private;
13232         uint32_t handle_idx;
13233         int n;
13234         int err;
13235         int idx;
13236         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
13237         struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
13238
13239         MLX5_ASSERT(wks);
13240         for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
13241                 dev_flow = &wks->flows[idx];
13242                 dv = &dev_flow->dv;
13243                 dh = dev_flow->handle;
13244                 dv_h = &dh->dvh;
13245                 n = dv->actions_n;
13246                 if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
13247                         if (dv->transfer) {
13248                                 MLX5_ASSERT(priv->sh->dr_drop_action);
13249                                 dv->actions[n++] = priv->sh->dr_drop_action;
13250                         } else {
13251 #ifdef HAVE_MLX5DV_DR
13252                                 /* DR supports drop action placeholder. */
13253                                 MLX5_ASSERT(priv->sh->dr_drop_action);
13254                                 dv->actions[n++] = priv->sh->dr_drop_action;
13255 #else
13256                                 /* For DV we use the explicit drop queue. */
13257                                 MLX5_ASSERT(priv->drop_queue.hrxq);
13258                                 dv->actions[n++] =
13259                                                 priv->drop_queue.hrxq->action;
13260 #endif
13261                         }
13262                 } else if ((dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
13263                            !dv_h->rix_sample && !dv_h->rix_dest_array)) {
13264                         struct mlx5_hrxq *hrxq;
13265                         uint32_t hrxq_idx;
13266
13267                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
13268                                                     &hrxq_idx);
13269                         if (!hrxq) {
13270                                 rte_flow_error_set
13271                                         (error, rte_errno,
13272                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13273                                          "cannot get hash queue");
13274                                 goto error;
13275                         }
13276                         dh->rix_hrxq = hrxq_idx;
13277                         dv->actions[n++] = hrxq->action;
13278                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
13279                         struct mlx5_hrxq *hrxq = NULL;
13280                         uint32_t hrxq_idx;
13281
13282                         hrxq_idx = __flow_dv_action_rss_hrxq_lookup(dev,
13283                                                 rss_desc->shared_rss,
13284                                                 dev_flow->hash_fields);
13285                         if (hrxq_idx)
13286                                 hrxq = mlx5_ipool_get
13287                                         (priv->sh->ipool[MLX5_IPOOL_HRXQ],
13288                                          hrxq_idx);
13289                         if (!hrxq) {
13290                                 rte_flow_error_set
13291                                         (error, rte_errno,
13292                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13293                                          "cannot get hash queue");
13294                                 goto error;
13295                         }
13296                         dh->rix_srss = rss_desc->shared_rss;
13297                         dv->actions[n++] = hrxq->action;
13298                 } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
13299                         if (!priv->sh->default_miss_action) {
13300                                 rte_flow_error_set
13301                                         (error, rte_errno,
13302                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13303                                          "default miss action not be created.");
13304                                 goto error;
13305                         }
13306                         dv->actions[n++] = priv->sh->default_miss_action;
13307                 }
13308                 err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object,
13309                                                (void *)&dv->value, n,
13310                                                dv->actions, &dh->drv_flow);
13311                 if (err) {
13312                         rte_flow_error_set(error, errno,
13313                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13314                                            NULL,
13315                                            "hardware refuses to create flow");
13316                         goto error;
13317                 }
13318                 if (priv->vmwa_context &&
13319                     dh->vf_vlan.tag && !dh->vf_vlan.created) {
13320                         /*
13321                          * The rule contains the VLAN pattern.
13322                          * For VF we are going to create VLAN
13323                          * interface to make hypervisor set correct
13324                          * e-Switch vport context.
13325                          */
13326                         mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
13327                 }
13328         }
13329         return 0;
13330 error:
13331         err = rte_errno; /* Save rte_errno before cleanup. */
13332         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
13333                        handle_idx, dh, next) {
13334                 /* hrxq is union, don't clear it if the flag is not set. */
13335                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) {
13336                         mlx5_hrxq_release(dev, dh->rix_hrxq);
13337                         dh->rix_hrxq = 0;
13338                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
13339                         dh->rix_srss = 0;
13340                 }
13341                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
13342                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
13343         }
13344         rte_errno = err; /* Restore rte_errno. */
13345         return -rte_errno;
13346 }
13347
13348 void
13349 flow_dv_matcher_remove_cb(struct mlx5_cache_list *list __rte_unused,
13350                           struct mlx5_cache_entry *entry)
13351 {
13352         struct mlx5_flow_dv_matcher *cache = container_of(entry, typeof(*cache),
13353                                                           entry);
13354
13355         claim_zero(mlx5_flow_os_destroy_flow_matcher(cache->matcher_object));
13356         mlx5_free(cache);
13357 }
13358
13359 /**
13360  * Release the flow matcher.
13361  *
13362  * @param dev
13363  *   Pointer to Ethernet device.
13364  * @param port_id
13365  *   Index to port ID action resource.
13366  *
13367  * @return
13368  *   1 while a reference on it exists, 0 when freed.
13369  */
13370 static int
13371 flow_dv_matcher_release(struct rte_eth_dev *dev,
13372                         struct mlx5_flow_handle *handle)
13373 {
13374         struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
13375         struct mlx5_flow_tbl_data_entry *tbl = container_of(matcher->tbl,
13376                                                             typeof(*tbl), tbl);
13377         int ret;
13378
13379         MLX5_ASSERT(matcher->matcher_object);
13380         ret = mlx5_cache_unregister(&tbl->matchers, &matcher->entry);
13381         flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl->tbl);
13382         return ret;
13383 }
13384
13385 /**
13386  * Release encap_decap resource.
13387  *
13388  * @param list
13389  *   Pointer to the hash list.
13390  * @param entry
13391  *   Pointer to exist resource entry object.
13392  */
13393 void
13394 flow_dv_encap_decap_remove_cb(struct mlx5_hlist *list,
13395                               struct mlx5_hlist_entry *entry)
13396 {
13397         struct mlx5_dev_ctx_shared *sh = list->ctx;
13398         struct mlx5_flow_dv_encap_decap_resource *res =
13399                 container_of(entry, typeof(*res), entry);
13400
13401         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
13402         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
13403 }
13404
13405 /**
13406  * Release an encap/decap resource.
13407  *
13408  * @param dev
13409  *   Pointer to Ethernet device.
13410  * @param encap_decap_idx
13411  *   Index of encap decap resource.
13412  *
13413  * @return
13414  *   1 while a reference on it exists, 0 when freed.
13415  */
13416 static int
13417 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
13418                                      uint32_t encap_decap_idx)
13419 {
13420         struct mlx5_priv *priv = dev->data->dev_private;
13421         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
13422
13423         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
13424                                         encap_decap_idx);
13425         if (!cache_resource)
13426                 return 0;
13427         MLX5_ASSERT(cache_resource->action);
13428         return mlx5_hlist_unregister(priv->sh->encaps_decaps,
13429                                      &cache_resource->entry);
13430 }
13431
13432 /**
13433  * Release an jump to table action resource.
13434  *
13435  * @param dev
13436  *   Pointer to Ethernet device.
13437  * @param rix_jump
13438  *   Index to the jump action resource.
13439  *
13440  * @return
13441  *   1 while a reference on it exists, 0 when freed.
13442  */
13443 static int
13444 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
13445                                   uint32_t rix_jump)
13446 {
13447         struct mlx5_priv *priv = dev->data->dev_private;
13448         struct mlx5_flow_tbl_data_entry *tbl_data;
13449
13450         tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
13451                                   rix_jump);
13452         if (!tbl_data)
13453                 return 0;
13454         return flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl_data->tbl);
13455 }
13456
13457 void
13458 flow_dv_modify_remove_cb(struct mlx5_hlist *list __rte_unused,
13459                          struct mlx5_hlist_entry *entry)
13460 {
13461         struct mlx5_flow_dv_modify_hdr_resource *res =
13462                 container_of(entry, typeof(*res), entry);
13463
13464         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
13465         mlx5_free(entry);
13466 }
13467
13468 /**
13469  * Release a modify-header resource.
13470  *
13471  * @param dev
13472  *   Pointer to Ethernet device.
13473  * @param handle
13474  *   Pointer to mlx5_flow_handle.
13475  *
13476  * @return
13477  *   1 while a reference on it exists, 0 when freed.
13478  */
13479 static int
13480 flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev,
13481                                     struct mlx5_flow_handle *handle)
13482 {
13483         struct mlx5_priv *priv = dev->data->dev_private;
13484         struct mlx5_flow_dv_modify_hdr_resource *entry = handle->dvh.modify_hdr;
13485
13486         MLX5_ASSERT(entry->action);
13487         return mlx5_hlist_unregister(priv->sh->modify_cmds, &entry->entry);
13488 }
13489
13490 void
13491 flow_dv_port_id_remove_cb(struct mlx5_cache_list *list,
13492                           struct mlx5_cache_entry *entry)
13493 {
13494         struct mlx5_dev_ctx_shared *sh = list->ctx;
13495         struct mlx5_flow_dv_port_id_action_resource *cache =
13496                         container_of(entry, typeof(*cache), entry);
13497
13498         claim_zero(mlx5_flow_os_destroy_flow_action(cache->action));
13499         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], cache->idx);
13500 }
13501
13502 /**
13503  * Release port ID action resource.
13504  *
13505  * @param dev
13506  *   Pointer to Ethernet device.
13507  * @param handle
13508  *   Pointer to mlx5_flow_handle.
13509  *
13510  * @return
13511  *   1 while a reference on it exists, 0 when freed.
13512  */
13513 static int
13514 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
13515                                         uint32_t port_id)
13516 {
13517         struct mlx5_priv *priv = dev->data->dev_private;
13518         struct mlx5_flow_dv_port_id_action_resource *cache;
13519
13520         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], port_id);
13521         if (!cache)
13522                 return 0;
13523         MLX5_ASSERT(cache->action);
13524         return mlx5_cache_unregister(&priv->sh->port_id_action_list,
13525                                      &cache->entry);
13526 }
13527
13528 /**
13529  * Release shared RSS action resource.
13530  *
13531  * @param dev
13532  *   Pointer to Ethernet device.
13533  * @param srss
13534  *   Shared RSS action index.
13535  */
13536 static void
13537 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss)
13538 {
13539         struct mlx5_priv *priv = dev->data->dev_private;
13540         struct mlx5_shared_action_rss *shared_rss;
13541
13542         shared_rss = mlx5_ipool_get
13543                         (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], srss);
13544         __atomic_sub_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
13545 }
13546
13547 void
13548 flow_dv_push_vlan_remove_cb(struct mlx5_cache_list *list,
13549                             struct mlx5_cache_entry *entry)
13550 {
13551         struct mlx5_dev_ctx_shared *sh = list->ctx;
13552         struct mlx5_flow_dv_push_vlan_action_resource *cache =
13553                         container_of(entry, typeof(*cache), entry);
13554
13555         claim_zero(mlx5_flow_os_destroy_flow_action(cache->action));
13556         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], cache->idx);
13557 }
13558
13559 /**
13560  * Release push vlan action resource.
13561  *
13562  * @param dev
13563  *   Pointer to Ethernet device.
13564  * @param handle
13565  *   Pointer to mlx5_flow_handle.
13566  *
13567  * @return
13568  *   1 while a reference on it exists, 0 when freed.
13569  */
13570 static int
13571 flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev,
13572                                           struct mlx5_flow_handle *handle)
13573 {
13574         struct mlx5_priv *priv = dev->data->dev_private;
13575         struct mlx5_flow_dv_push_vlan_action_resource *cache;
13576         uint32_t idx = handle->dvh.rix_push_vlan;
13577
13578         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
13579         if (!cache)
13580                 return 0;
13581         MLX5_ASSERT(cache->action);
13582         return mlx5_cache_unregister(&priv->sh->push_vlan_action_list,
13583                                      &cache->entry);
13584 }
13585
13586 /**
13587  * Release the fate resource.
13588  *
13589  * @param dev
13590  *   Pointer to Ethernet device.
13591  * @param handle
13592  *   Pointer to mlx5_flow_handle.
13593  */
13594 static void
13595 flow_dv_fate_resource_release(struct rte_eth_dev *dev,
13596                                struct mlx5_flow_handle *handle)
13597 {
13598         if (!handle->rix_fate)
13599                 return;
13600         switch (handle->fate_action) {
13601         case MLX5_FLOW_FATE_QUEUE:
13602                 if (!handle->dvh.rix_sample && !handle->dvh.rix_dest_array)
13603                         mlx5_hrxq_release(dev, handle->rix_hrxq);
13604                 break;
13605         case MLX5_FLOW_FATE_JUMP:
13606                 flow_dv_jump_tbl_resource_release(dev, handle->rix_jump);
13607                 break;
13608         case MLX5_FLOW_FATE_PORT_ID:
13609                 flow_dv_port_id_action_resource_release(dev,
13610                                 handle->rix_port_id_action);
13611                 break;
13612         default:
13613                 DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
13614                 break;
13615         }
13616         handle->rix_fate = 0;
13617 }
13618
13619 void
13620 flow_dv_sample_remove_cb(struct mlx5_cache_list *list __rte_unused,
13621                          struct mlx5_cache_entry *entry)
13622 {
13623         struct mlx5_flow_dv_sample_resource *cache_resource =
13624                         container_of(entry, typeof(*cache_resource), entry);
13625         struct rte_eth_dev *dev = cache_resource->dev;
13626         struct mlx5_priv *priv = dev->data->dev_private;
13627
13628         if (cache_resource->verbs_action)
13629                 claim_zero(mlx5_flow_os_destroy_flow_action
13630                                 (cache_resource->verbs_action));
13631         if (cache_resource->normal_path_tbl)
13632                 flow_dv_tbl_resource_release(MLX5_SH(dev),
13633                         cache_resource->normal_path_tbl);
13634         flow_dv_sample_sub_actions_release(dev,
13635                                 &cache_resource->sample_idx);
13636         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
13637                         cache_resource->idx);
13638         DRV_LOG(DEBUG, "sample resource %p: removed",
13639                 (void *)cache_resource);
13640 }
13641
13642 /**
13643  * Release an sample resource.
13644  *
13645  * @param dev
13646  *   Pointer to Ethernet device.
13647  * @param handle
13648  *   Pointer to mlx5_flow_handle.
13649  *
13650  * @return
13651  *   1 while a reference on it exists, 0 when freed.
13652  */
13653 static int
13654 flow_dv_sample_resource_release(struct rte_eth_dev *dev,
13655                                      struct mlx5_flow_handle *handle)
13656 {
13657         struct mlx5_priv *priv = dev->data->dev_private;
13658         struct mlx5_flow_dv_sample_resource *cache_resource;
13659
13660         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
13661                          handle->dvh.rix_sample);
13662         if (!cache_resource)
13663                 return 0;
13664         MLX5_ASSERT(cache_resource->verbs_action);
13665         return mlx5_cache_unregister(&priv->sh->sample_action_list,
13666                                      &cache_resource->entry);
13667 }
13668
13669 void
13670 flow_dv_dest_array_remove_cb(struct mlx5_cache_list *list __rte_unused,
13671                              struct mlx5_cache_entry *entry)
13672 {
13673         struct mlx5_flow_dv_dest_array_resource *cache_resource =
13674                         container_of(entry, typeof(*cache_resource), entry);
13675         struct rte_eth_dev *dev = cache_resource->dev;
13676         struct mlx5_priv *priv = dev->data->dev_private;
13677         uint32_t i = 0;
13678
13679         MLX5_ASSERT(cache_resource->action);
13680         if (cache_resource->action)
13681                 claim_zero(mlx5_flow_os_destroy_flow_action
13682                                         (cache_resource->action));
13683         for (; i < cache_resource->num_of_dest; i++)
13684                 flow_dv_sample_sub_actions_release(dev,
13685                                 &cache_resource->sample_idx[i]);
13686         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
13687                         cache_resource->idx);
13688         DRV_LOG(DEBUG, "destination array resource %p: removed",
13689                 (void *)cache_resource);
13690 }
13691
13692 /**
13693  * Release an destination array resource.
13694  *
13695  * @param dev
13696  *   Pointer to Ethernet device.
13697  * @param handle
13698  *   Pointer to mlx5_flow_handle.
13699  *
13700  * @return
13701  *   1 while a reference on it exists, 0 when freed.
13702  */
13703 static int
13704 flow_dv_dest_array_resource_release(struct rte_eth_dev *dev,
13705                                     struct mlx5_flow_handle *handle)
13706 {
13707         struct mlx5_priv *priv = dev->data->dev_private;
13708         struct mlx5_flow_dv_dest_array_resource *cache;
13709
13710         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
13711                                handle->dvh.rix_dest_array);
13712         if (!cache)
13713                 return 0;
13714         MLX5_ASSERT(cache->action);
13715         return mlx5_cache_unregister(&priv->sh->dest_array_list,
13716                                      &cache->entry);
13717 }
13718
13719 static void
13720 flow_dv_geneve_tlv_option_resource_release(struct rte_eth_dev *dev)
13721 {
13722         struct mlx5_priv *priv = dev->data->dev_private;
13723         struct mlx5_dev_ctx_shared *sh = priv->sh;
13724         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
13725                                 sh->geneve_tlv_option_resource;
13726         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
13727         if (geneve_opt_resource) {
13728                 if (!(__atomic_sub_fetch(&geneve_opt_resource->refcnt, 1,
13729                                          __ATOMIC_RELAXED))) {
13730                         claim_zero(mlx5_devx_cmd_destroy
13731                                         (geneve_opt_resource->obj));
13732                         mlx5_free(sh->geneve_tlv_option_resource);
13733                         sh->geneve_tlv_option_resource = NULL;
13734                 }
13735         }
13736         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
13737 }
13738
13739 /**
13740  * Remove the flow from the NIC but keeps it in memory.
13741  * Lock free, (mutex should be acquired by caller).
13742  *
13743  * @param[in] dev
13744  *   Pointer to Ethernet device.
13745  * @param[in, out] flow
13746  *   Pointer to flow structure.
13747  */
13748 static void
13749 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
13750 {
13751         struct mlx5_flow_handle *dh;
13752         uint32_t handle_idx;
13753         struct mlx5_priv *priv = dev->data->dev_private;
13754
13755         if (!flow)
13756                 return;
13757         handle_idx = flow->dev_handles;
13758         while (handle_idx) {
13759                 dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
13760                                     handle_idx);
13761                 if (!dh)
13762                         return;
13763                 if (dh->drv_flow) {
13764                         claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow));
13765                         dh->drv_flow = NULL;
13766                 }
13767                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
13768                         flow_dv_fate_resource_release(dev, dh);
13769                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
13770                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
13771                 handle_idx = dh->next.next;
13772         }
13773 }
13774
13775 /**
13776  * Remove the flow from the NIC and the memory.
13777  * Lock free, (mutex should be acquired by caller).
13778  *
13779  * @param[in] dev
13780  *   Pointer to the Ethernet device structure.
13781  * @param[in, out] flow
13782  *   Pointer to flow structure.
13783  */
13784 static void
13785 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
13786 {
13787         struct mlx5_flow_handle *dev_handle;
13788         struct mlx5_priv *priv = dev->data->dev_private;
13789         struct mlx5_flow_meter_info *fm = NULL;
13790         uint32_t srss = 0;
13791
13792         if (!flow)
13793                 return;
13794         flow_dv_remove(dev, flow);
13795         if (flow->counter) {
13796                 flow_dv_counter_free(dev, flow->counter);
13797                 flow->counter = 0;
13798         }
13799         if (flow->meter) {
13800                 fm = flow_dv_meter_find_by_idx(priv, flow->meter);
13801                 if (fm)
13802                         mlx5_flow_meter_detach(priv, fm);
13803                 flow->meter = 0;
13804         }
13805         /* Keep the current age handling by default. */
13806         if (flow->indirect_type == MLX5_INDIRECT_ACTION_TYPE_CT && flow->ct)
13807                 flow_dv_aso_ct_release(dev, flow->ct);
13808         else if (flow->age)
13809                 flow_dv_aso_age_release(dev, flow->age);
13810         if (flow->geneve_tlv_option) {
13811                 flow_dv_geneve_tlv_option_resource_release(dev);
13812                 flow->geneve_tlv_option = 0;
13813         }
13814         while (flow->dev_handles) {
13815                 uint32_t tmp_idx = flow->dev_handles;
13816
13817                 dev_handle = mlx5_ipool_get(priv->sh->ipool
13818                                             [MLX5_IPOOL_MLX5_FLOW], tmp_idx);
13819                 if (!dev_handle)
13820                         return;
13821                 flow->dev_handles = dev_handle->next.next;
13822                 if (dev_handle->dvh.matcher)
13823                         flow_dv_matcher_release(dev, dev_handle);
13824                 if (dev_handle->dvh.rix_sample)
13825                         flow_dv_sample_resource_release(dev, dev_handle);
13826                 if (dev_handle->dvh.rix_dest_array)
13827                         flow_dv_dest_array_resource_release(dev, dev_handle);
13828                 if (dev_handle->dvh.rix_encap_decap)
13829                         flow_dv_encap_decap_resource_release(dev,
13830                                 dev_handle->dvh.rix_encap_decap);
13831                 if (dev_handle->dvh.modify_hdr)
13832                         flow_dv_modify_hdr_resource_release(dev, dev_handle);
13833                 if (dev_handle->dvh.rix_push_vlan)
13834                         flow_dv_push_vlan_action_resource_release(dev,
13835                                                                   dev_handle);
13836                 if (dev_handle->dvh.rix_tag)
13837                         flow_dv_tag_release(dev,
13838                                             dev_handle->dvh.rix_tag);
13839                 if (dev_handle->fate_action != MLX5_FLOW_FATE_SHARED_RSS)
13840                         flow_dv_fate_resource_release(dev, dev_handle);
13841                 else if (!srss)
13842                         srss = dev_handle->rix_srss;
13843                 if (fm && dev_handle->is_meter_flow_id &&
13844                     dev_handle->split_flow_id)
13845                         mlx5_ipool_free(fm->flow_ipool,
13846                                         dev_handle->split_flow_id);
13847                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
13848                            tmp_idx);
13849         }
13850         if (srss)
13851                 flow_dv_shared_rss_action_release(dev, srss);
13852 }
13853
13854 /**
13855  * Release array of hash RX queue objects.
13856  * Helper function.
13857  *
13858  * @param[in] dev
13859  *   Pointer to the Ethernet device structure.
13860  * @param[in, out] hrxqs
13861  *   Array of hash RX queue objects.
13862  *
13863  * @return
13864  *   Total number of references to hash RX queue objects in *hrxqs* array
13865  *   after this operation.
13866  */
13867 static int
13868 __flow_dv_hrxqs_release(struct rte_eth_dev *dev,
13869                         uint32_t (*hrxqs)[MLX5_RSS_HASH_FIELDS_LEN])
13870 {
13871         size_t i;
13872         int remaining = 0;
13873
13874         for (i = 0; i < RTE_DIM(*hrxqs); i++) {
13875                 int ret = mlx5_hrxq_release(dev, (*hrxqs)[i]);
13876
13877                 if (!ret)
13878                         (*hrxqs)[i] = 0;
13879                 remaining += ret;
13880         }
13881         return remaining;
13882 }
13883
13884 /**
13885  * Release all hash RX queue objects representing shared RSS action.
13886  *
13887  * @param[in] dev
13888  *   Pointer to the Ethernet device structure.
13889  * @param[in, out] action
13890  *   Shared RSS action to remove hash RX queue objects from.
13891  *
13892  * @return
13893  *   Total number of references to hash RX queue objects stored in *action*
13894  *   after this operation.
13895  *   Expected to be 0 if no external references held.
13896  */
13897 static int
13898 __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
13899                                  struct mlx5_shared_action_rss *shared_rss)
13900 {
13901         return __flow_dv_hrxqs_release(dev, &shared_rss->hrxq);
13902 }
13903
13904 /**
13905  * Adjust L3/L4 hash value of pre-created shared RSS hrxq according to
13906  * user input.
13907  *
13908  * Only one hash value is available for one L3+L4 combination:
13909  * for example:
13910  * MLX5_RSS_HASH_IPV4, MLX5_RSS_HASH_IPV4_SRC_ONLY, and
13911  * MLX5_RSS_HASH_IPV4_DST_ONLY are mutually exclusive so they can share
13912  * same slot in mlx5_rss_hash_fields.
13913  *
13914  * @param[in] rss
13915  *   Pointer to the shared action RSS conf.
13916  * @param[in, out] hash_field
13917  *   hash_field variable needed to be adjusted.
13918  *
13919  * @return
13920  *   void
13921  */
13922 static void
13923 __flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
13924                                      uint64_t *hash_field)
13925 {
13926         uint64_t rss_types = rss->origin.types;
13927
13928         switch (*hash_field & ~IBV_RX_HASH_INNER) {
13929         case MLX5_RSS_HASH_IPV4:
13930                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
13931                         *hash_field &= ~MLX5_RSS_HASH_IPV4;
13932                         if (rss_types & ETH_RSS_L3_DST_ONLY)
13933                                 *hash_field |= IBV_RX_HASH_DST_IPV4;
13934                         else if (rss_types & ETH_RSS_L3_SRC_ONLY)
13935                                 *hash_field |= IBV_RX_HASH_SRC_IPV4;
13936                         else
13937                                 *hash_field |= MLX5_RSS_HASH_IPV4;
13938                 }
13939                 return;
13940         case MLX5_RSS_HASH_IPV6:
13941                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
13942                         *hash_field &= ~MLX5_RSS_HASH_IPV6;
13943                         if (rss_types & ETH_RSS_L3_DST_ONLY)
13944                                 *hash_field |= IBV_RX_HASH_DST_IPV6;
13945                         else if (rss_types & ETH_RSS_L3_SRC_ONLY)
13946                                 *hash_field |= IBV_RX_HASH_SRC_IPV6;
13947                         else
13948                                 *hash_field |= MLX5_RSS_HASH_IPV6;
13949                 }
13950                 return;
13951         case MLX5_RSS_HASH_IPV4_UDP:
13952                 /* fall-through. */
13953         case MLX5_RSS_HASH_IPV6_UDP:
13954                 if (rss_types & ETH_RSS_UDP) {
13955                         *hash_field &= ~MLX5_UDP_IBV_RX_HASH;
13956                         if (rss_types & ETH_RSS_L4_DST_ONLY)
13957                                 *hash_field |= IBV_RX_HASH_DST_PORT_UDP;
13958                         else if (rss_types & ETH_RSS_L4_SRC_ONLY)
13959                                 *hash_field |= IBV_RX_HASH_SRC_PORT_UDP;
13960                         else
13961                                 *hash_field |= MLX5_UDP_IBV_RX_HASH;
13962                 }
13963                 return;
13964         case MLX5_RSS_HASH_IPV4_TCP:
13965                 /* fall-through. */
13966         case MLX5_RSS_HASH_IPV6_TCP:
13967                 if (rss_types & ETH_RSS_TCP) {
13968                         *hash_field &= ~MLX5_TCP_IBV_RX_HASH;
13969                         if (rss_types & ETH_RSS_L4_DST_ONLY)
13970                                 *hash_field |= IBV_RX_HASH_DST_PORT_TCP;
13971                         else if (rss_types & ETH_RSS_L4_SRC_ONLY)
13972                                 *hash_field |= IBV_RX_HASH_SRC_PORT_TCP;
13973                         else
13974                                 *hash_field |= MLX5_TCP_IBV_RX_HASH;
13975                 }
13976                 return;
13977         default:
13978                 return;
13979         }
13980 }
13981
13982 /**
13983  * Setup shared RSS action.
13984  * Prepare set of hash RX queue objects sufficient to handle all valid
13985  * hash_fields combinations (see enum ibv_rx_hash_fields).
13986  *
13987  * @param[in] dev
13988  *   Pointer to the Ethernet device structure.
13989  * @param[in] action_idx
13990  *   Shared RSS action ipool index.
13991  * @param[in, out] action
13992  *   Partially initialized shared RSS action.
13993  * @param[out] error
13994  *   Perform verbose error reporting if not NULL. Initialized in case of
13995  *   error only.
13996  *
13997  * @return
13998  *   0 on success, otherwise negative errno value.
13999  */
14000 static int
14001 __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
14002                            uint32_t action_idx,
14003                            struct mlx5_shared_action_rss *shared_rss,
14004                            struct rte_flow_error *error)
14005 {
14006         struct mlx5_flow_rss_desc rss_desc = { 0 };
14007         size_t i;
14008         int err;
14009
14010         if (mlx5_ind_table_obj_setup(dev, shared_rss->ind_tbl)) {
14011                 return rte_flow_error_set(error, rte_errno,
14012                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14013                                           "cannot setup indirection table");
14014         }
14015         memcpy(rss_desc.key, shared_rss->origin.key, MLX5_RSS_HASH_KEY_LEN);
14016         rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
14017         rss_desc.const_q = shared_rss->origin.queue;
14018         rss_desc.queue_num = shared_rss->origin.queue_num;
14019         /* Set non-zero value to indicate a shared RSS. */
14020         rss_desc.shared_rss = action_idx;
14021         rss_desc.ind_tbl = shared_rss->ind_tbl;
14022         for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
14023                 uint32_t hrxq_idx;
14024                 uint64_t hash_fields = mlx5_rss_hash_fields[i];
14025                 int tunnel = 0;
14026
14027                 __flow_dv_action_rss_l34_hash_adjust(shared_rss, &hash_fields);
14028                 if (shared_rss->origin.level > 1) {
14029                         hash_fields |= IBV_RX_HASH_INNER;
14030                         tunnel = 1;
14031                 }
14032                 rss_desc.tunnel = tunnel;
14033                 rss_desc.hash_fields = hash_fields;
14034                 hrxq_idx = mlx5_hrxq_get(dev, &rss_desc);
14035                 if (!hrxq_idx) {
14036                         rte_flow_error_set
14037                                 (error, rte_errno,
14038                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14039                                  "cannot get hash queue");
14040                         goto error_hrxq_new;
14041                 }
14042                 err = __flow_dv_action_rss_hrxq_set
14043                         (shared_rss, hash_fields, hrxq_idx);
14044                 MLX5_ASSERT(!err);
14045         }
14046         return 0;
14047 error_hrxq_new:
14048         err = rte_errno;
14049         __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
14050         if (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true))
14051                 shared_rss->ind_tbl = NULL;
14052         rte_errno = err;
14053         return -rte_errno;
14054 }
14055
14056 /**
14057  * Create shared RSS action.
14058  *
14059  * @param[in] dev
14060  *   Pointer to the Ethernet device structure.
14061  * @param[in] conf
14062  *   Shared action configuration.
14063  * @param[in] rss
14064  *   RSS action specification used to create shared action.
14065  * @param[out] error
14066  *   Perform verbose error reporting if not NULL. Initialized in case of
14067  *   error only.
14068  *
14069  * @return
14070  *   A valid shared action ID in case of success, 0 otherwise and
14071  *   rte_errno is set.
14072  */
14073 static uint32_t
14074 __flow_dv_action_rss_create(struct rte_eth_dev *dev,
14075                             const struct rte_flow_indir_action_conf *conf,
14076                             const struct rte_flow_action_rss *rss,
14077                             struct rte_flow_error *error)
14078 {
14079         struct mlx5_priv *priv = dev->data->dev_private;
14080         struct mlx5_shared_action_rss *shared_rss = NULL;
14081         void *queue = NULL;
14082         struct rte_flow_action_rss *origin;
14083         const uint8_t *rss_key;
14084         uint32_t queue_size = rss->queue_num * sizeof(uint16_t);
14085         uint32_t idx;
14086
14087         RTE_SET_USED(conf);
14088         queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
14089                             0, SOCKET_ID_ANY);
14090         shared_rss = mlx5_ipool_zmalloc
14091                          (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], &idx);
14092         if (!shared_rss || !queue) {
14093                 rte_flow_error_set(error, ENOMEM,
14094                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14095                                    "cannot allocate resource memory");
14096                 goto error_rss_init;
14097         }
14098         if (idx > (1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET)) {
14099                 rte_flow_error_set(error, E2BIG,
14100                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14101                                    "rss action number out of range");
14102                 goto error_rss_init;
14103         }
14104         shared_rss->ind_tbl = mlx5_malloc(MLX5_MEM_ZERO,
14105                                           sizeof(*shared_rss->ind_tbl),
14106                                           0, SOCKET_ID_ANY);
14107         if (!shared_rss->ind_tbl) {
14108                 rte_flow_error_set(error, ENOMEM,
14109                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14110                                    "cannot allocate resource memory");
14111                 goto error_rss_init;
14112         }
14113         memcpy(queue, rss->queue, queue_size);
14114         shared_rss->ind_tbl->queues = queue;
14115         shared_rss->ind_tbl->queues_n = rss->queue_num;
14116         origin = &shared_rss->origin;
14117         origin->func = rss->func;
14118         origin->level = rss->level;
14119         /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
14120         origin->types = !rss->types ? ETH_RSS_IP : rss->types;
14121         /* NULL RSS key indicates default RSS key. */
14122         rss_key = !rss->key ? rss_hash_default_key : rss->key;
14123         memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
14124         origin->key = &shared_rss->key[0];
14125         origin->key_len = MLX5_RSS_HASH_KEY_LEN;
14126         origin->queue = queue;
14127         origin->queue_num = rss->queue_num;
14128         if (__flow_dv_action_rss_setup(dev, idx, shared_rss, error))
14129                 goto error_rss_init;
14130         rte_spinlock_init(&shared_rss->action_rss_sl);
14131         __atomic_add_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
14132         rte_spinlock_lock(&priv->shared_act_sl);
14133         ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14134                      &priv->rss_shared_actions, idx, shared_rss, next);
14135         rte_spinlock_unlock(&priv->shared_act_sl);
14136         return idx;
14137 error_rss_init:
14138         if (shared_rss) {
14139                 if (shared_rss->ind_tbl)
14140                         mlx5_free(shared_rss->ind_tbl);
14141                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14142                                 idx);
14143         }
14144         if (queue)
14145                 mlx5_free(queue);
14146         return 0;
14147 }
14148
14149 /**
14150  * Destroy the shared RSS action.
14151  * Release related hash RX queue objects.
14152  *
14153  * @param[in] dev
14154  *   Pointer to the Ethernet device structure.
14155  * @param[in] idx
14156  *   The shared RSS action object ID to be removed.
14157  * @param[out] error
14158  *   Perform verbose error reporting if not NULL. Initialized in case of
14159  *   error only.
14160  *
14161  * @return
14162  *   0 on success, otherwise negative errno value.
14163  */
14164 static int
14165 __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx,
14166                              struct rte_flow_error *error)
14167 {
14168         struct mlx5_priv *priv = dev->data->dev_private;
14169         struct mlx5_shared_action_rss *shared_rss =
14170             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
14171         uint32_t old_refcnt = 1;
14172         int remaining;
14173         uint16_t *queue = NULL;
14174
14175         if (!shared_rss)
14176                 return rte_flow_error_set(error, EINVAL,
14177                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14178                                           "invalid shared action");
14179         remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
14180         if (remaining)
14181                 return rte_flow_error_set(error, EBUSY,
14182                                           RTE_FLOW_ERROR_TYPE_ACTION,
14183                                           NULL,
14184                                           "shared rss hrxq has references");
14185         if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt,
14186                                          0, 0, __ATOMIC_ACQUIRE,
14187                                          __ATOMIC_RELAXED))
14188                 return rte_flow_error_set(error, EBUSY,
14189                                           RTE_FLOW_ERROR_TYPE_ACTION,
14190                                           NULL,
14191                                           "shared rss has references");
14192         queue = shared_rss->ind_tbl->queues;
14193         remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true);
14194         if (remaining)
14195                 return rte_flow_error_set(error, EBUSY,
14196                                           RTE_FLOW_ERROR_TYPE_ACTION,
14197                                           NULL,
14198                                           "shared rss indirection table has"
14199                                           " references");
14200         mlx5_free(queue);
14201         rte_spinlock_lock(&priv->shared_act_sl);
14202         ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14203                      &priv->rss_shared_actions, idx, shared_rss, next);
14204         rte_spinlock_unlock(&priv->shared_act_sl);
14205         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14206                         idx);
14207         return 0;
14208 }
14209
14210 /**
14211  * Create indirect action, lock free,
14212  * (mutex should be acquired by caller).
14213  * Dispatcher for action type specific call.
14214  *
14215  * @param[in] dev
14216  *   Pointer to the Ethernet device structure.
14217  * @param[in] conf
14218  *   Shared action configuration.
14219  * @param[in] action
14220  *   Action specification used to create indirect action.
14221  * @param[out] error
14222  *   Perform verbose error reporting if not NULL. Initialized in case of
14223  *   error only.
14224  *
14225  * @return
14226  *   A valid shared action handle in case of success, NULL otherwise and
14227  *   rte_errno is set.
14228  */
14229 static struct rte_flow_action_handle *
14230 flow_dv_action_create(struct rte_eth_dev *dev,
14231                       const struct rte_flow_indir_action_conf *conf,
14232                       const struct rte_flow_action *action,
14233                       struct rte_flow_error *err)
14234 {
14235         struct mlx5_priv *priv = dev->data->dev_private;
14236         uint32_t age_idx = 0;
14237         uint32_t idx = 0;
14238         uint32_t ret = 0;
14239
14240         switch (action->type) {
14241         case RTE_FLOW_ACTION_TYPE_RSS:
14242                 ret = __flow_dv_action_rss_create(dev, conf, action->conf, err);
14243                 idx = (MLX5_INDIRECT_ACTION_TYPE_RSS <<
14244                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
14245                 break;
14246         case RTE_FLOW_ACTION_TYPE_AGE:
14247                 age_idx = flow_dv_aso_age_alloc(dev, err);
14248                 if (!age_idx) {
14249                         ret = -rte_errno;
14250                         break;
14251                 }
14252                 idx = (MLX5_INDIRECT_ACTION_TYPE_AGE <<
14253                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | age_idx;
14254                 flow_dv_aso_age_params_init(dev, age_idx,
14255                                         ((const struct rte_flow_action_age *)
14256                                                 action->conf)->context ?
14257                                         ((const struct rte_flow_action_age *)
14258                                                 action->conf)->context :
14259                                         (void *)(uintptr_t)idx,
14260                                         ((const struct rte_flow_action_age *)
14261                                                 action->conf)->timeout);
14262                 ret = age_idx;
14263                 break;
14264         case RTE_FLOW_ACTION_TYPE_COUNT:
14265                 ret = flow_dv_translate_create_counter(dev, NULL, NULL, NULL);
14266                 idx = (MLX5_INDIRECT_ACTION_TYPE_COUNT <<
14267                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
14268                 break;
14269         case RTE_FLOW_ACTION_TYPE_CONNTRACK:
14270                 ret = flow_dv_translate_create_conntrack(dev, action->conf,
14271                                                          err);
14272                 idx = MLX5_INDIRECT_ACT_CT_GEN_IDX(PORT_ID(priv), ret);
14273                 break;
14274         default:
14275                 rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
14276                                    NULL, "action type not supported");
14277                 break;
14278         }
14279         return ret ? (struct rte_flow_action_handle *)(uintptr_t)idx : NULL;
14280 }
14281
14282 /**
14283  * Destroy the indirect action.
14284  * Release action related resources on the NIC and the memory.
14285  * Lock free, (mutex should be acquired by caller).
14286  * Dispatcher for action type specific call.
14287  *
14288  * @param[in] dev
14289  *   Pointer to the Ethernet device structure.
14290  * @param[in] handle
14291  *   The indirect action object handle to be removed.
14292  * @param[out] error
14293  *   Perform verbose error reporting if not NULL. Initialized in case of
14294  *   error only.
14295  *
14296  * @return
14297  *   0 on success, otherwise negative errno value.
14298  */
14299 static int
14300 flow_dv_action_destroy(struct rte_eth_dev *dev,
14301                        struct rte_flow_action_handle *handle,
14302                        struct rte_flow_error *error)
14303 {
14304         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
14305         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
14306         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
14307         struct mlx5_flow_counter *cnt;
14308         uint32_t no_flow_refcnt = 1;
14309         int ret;
14310
14311         switch (type) {
14312         case MLX5_INDIRECT_ACTION_TYPE_RSS:
14313                 return __flow_dv_action_rss_release(dev, idx, error);
14314         case MLX5_INDIRECT_ACTION_TYPE_COUNT:
14315                 cnt = flow_dv_counter_get_by_idx(dev, idx, NULL);
14316                 if (!__atomic_compare_exchange_n(&cnt->shared_info.refcnt,
14317                                                  &no_flow_refcnt, 1, false,
14318                                                  __ATOMIC_ACQUIRE,
14319                                                  __ATOMIC_RELAXED))
14320                         return rte_flow_error_set(error, EBUSY,
14321                                                   RTE_FLOW_ERROR_TYPE_ACTION,
14322                                                   NULL,
14323                                                   "Indirect count action has references");
14324                 flow_dv_counter_free(dev, idx);
14325                 return 0;
14326         case MLX5_INDIRECT_ACTION_TYPE_AGE:
14327                 ret = flow_dv_aso_age_release(dev, idx);
14328                 if (ret)
14329                         /*
14330                          * In this case, the last flow has a reference will
14331                          * actually release the age action.
14332                          */
14333                         DRV_LOG(DEBUG, "Indirect age action %" PRIu32 " was"
14334                                 " released with references %d.", idx, ret);
14335                 return 0;
14336         case MLX5_INDIRECT_ACTION_TYPE_CT:
14337                 ret = flow_dv_aso_ct_release(dev, idx);
14338                 if (ret < 0)
14339                         return ret;
14340                 if (ret > 0)
14341                         DRV_LOG(DEBUG, "Connection tracking object %u still "
14342                                 "has references %d.", idx, ret);
14343                 return 0;
14344         default:
14345                 return rte_flow_error_set(error, ENOTSUP,
14346                                           RTE_FLOW_ERROR_TYPE_ACTION,
14347                                           NULL,
14348                                           "action type not supported");
14349         }
14350 }
14351
14352 /**
14353  * Updates in place shared RSS action configuration.
14354  *
14355  * @param[in] dev
14356  *   Pointer to the Ethernet device structure.
14357  * @param[in] idx
14358  *   The shared RSS action object ID to be updated.
14359  * @param[in] action_conf
14360  *   RSS action specification used to modify *shared_rss*.
14361  * @param[out] error
14362  *   Perform verbose error reporting if not NULL. Initialized in case of
14363  *   error only.
14364  *
14365  * @return
14366  *   0 on success, otherwise negative errno value.
14367  * @note: currently only support update of RSS queues.
14368  */
14369 static int
14370 __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx,
14371                             const struct rte_flow_action_rss *action_conf,
14372                             struct rte_flow_error *error)
14373 {
14374         struct mlx5_priv *priv = dev->data->dev_private;
14375         struct mlx5_shared_action_rss *shared_rss =
14376             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
14377         int ret = 0;
14378         void *queue = NULL;
14379         uint16_t *queue_old = NULL;
14380         uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);
14381
14382         if (!shared_rss)
14383                 return rte_flow_error_set(error, EINVAL,
14384                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14385                                           "invalid shared action to update");
14386         if (priv->obj_ops.ind_table_modify == NULL)
14387                 return rte_flow_error_set(error, ENOTSUP,
14388                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14389                                           "cannot modify indirection table");
14390         queue = mlx5_malloc(MLX5_MEM_ZERO,
14391                             RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
14392                             0, SOCKET_ID_ANY);
14393         if (!queue)
14394                 return rte_flow_error_set(error, ENOMEM,
14395                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14396                                           NULL,
14397                                           "cannot allocate resource memory");
14398         memcpy(queue, action_conf->queue, queue_size);
14399         MLX5_ASSERT(shared_rss->ind_tbl);
14400         rte_spinlock_lock(&shared_rss->action_rss_sl);
14401         queue_old = shared_rss->ind_tbl->queues;
14402         ret = mlx5_ind_table_obj_modify(dev, shared_rss->ind_tbl,
14403                                         queue, action_conf->queue_num, true);
14404         if (ret) {
14405                 mlx5_free(queue);
14406                 ret = rte_flow_error_set(error, rte_errno,
14407                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14408                                           "cannot update indirection table");
14409         } else {
14410                 mlx5_free(queue_old);
14411                 shared_rss->origin.queue = queue;
14412                 shared_rss->origin.queue_num = action_conf->queue_num;
14413         }
14414         rte_spinlock_unlock(&shared_rss->action_rss_sl);
14415         return ret;
14416 }
14417
14418 /*
14419  * Updates in place conntrack context or direction.
14420  * Context update should be synchronized.
14421  *
14422  * @param[in] dev
14423  *   Pointer to the Ethernet device structure.
14424  * @param[in] idx
14425  *   The conntrack object ID to be updated.
14426  * @param[in] update
14427  *   Pointer to the structure of information to update.
14428  * @param[out] error
14429  *   Perform verbose error reporting if not NULL. Initialized in case of
14430  *   error only.
14431  *
14432  * @return
14433  *   0 on success, otherwise negative errno value.
14434  */
14435 static int
14436 __flow_dv_action_ct_update(struct rte_eth_dev *dev, uint32_t idx,
14437                            const struct rte_flow_modify_conntrack *update,
14438                            struct rte_flow_error *error)
14439 {
14440         struct mlx5_priv *priv = dev->data->dev_private;
14441         struct mlx5_aso_ct_action *ct;
14442         const struct rte_flow_action_conntrack *new_prf;
14443         int ret = 0;
14444         uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx);
14445         uint32_t dev_idx;
14446
14447         if (PORT_ID(priv) != owner)
14448                 return rte_flow_error_set(error, EACCES,
14449                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14450                                           NULL,
14451                                           "CT object owned by another port");
14452         dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx);
14453         ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx);
14454         if (!ct->refcnt)
14455                 return rte_flow_error_set(error, ENOMEM,
14456                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14457                                           NULL,
14458                                           "CT object is inactive");
14459         new_prf = &update->new_ct;
14460         if (update->direction)
14461                 ct->is_original = !!new_prf->is_original_dir;
14462         if (update->state) {
14463                 /* Only validate the profile when it needs to be updated. */
14464                 ret = mlx5_validate_action_ct(dev, new_prf, error);
14465                 if (ret)
14466                         return ret;
14467                 ret = mlx5_aso_ct_update_by_wqe(priv->sh, ct, new_prf);
14468                 if (ret)
14469                         return rte_flow_error_set(error, EIO,
14470                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14471                                         NULL,
14472                                         "Failed to send CT context update WQE");
14473                 /* Block until ready or a failure. */
14474                 ret = mlx5_aso_ct_available(priv->sh, ct);
14475                 if (ret)
14476                         rte_flow_error_set(error, rte_errno,
14477                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14478                                            NULL,
14479                                            "Timeout to get the CT update");
14480         }
14481         return ret;
14482 }
14483
14484 /**
14485  * Updates in place shared action configuration, lock free,
14486  * (mutex should be acquired by caller).
14487  *
14488  * @param[in] dev
14489  *   Pointer to the Ethernet device structure.
14490  * @param[in] handle
14491  *   The indirect action object handle to be updated.
14492  * @param[in] update
14493  *   Action specification used to modify the action pointed by *handle*.
14494  *   *update* could be of same type with the action pointed by the *handle*
14495  *   handle argument, or some other structures like a wrapper, depending on
14496  *   the indirect action type.
14497  * @param[out] error
14498  *   Perform verbose error reporting if not NULL. Initialized in case of
14499  *   error only.
14500  *
14501  * @return
14502  *   0 on success, otherwise negative errno value.
14503  */
14504 static int
14505 flow_dv_action_update(struct rte_eth_dev *dev,
14506                         struct rte_flow_action_handle *handle,
14507                         const void *update,
14508                         struct rte_flow_error *err)
14509 {
14510         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
14511         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
14512         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
14513         const void *action_conf;
14514
14515         switch (type) {
14516         case MLX5_INDIRECT_ACTION_TYPE_RSS:
14517                 action_conf = ((const struct rte_flow_action *)update)->conf;
14518                 return __flow_dv_action_rss_update(dev, idx, action_conf, err);
14519         case MLX5_INDIRECT_ACTION_TYPE_CT:
14520                 return __flow_dv_action_ct_update(dev, idx, update, err);
14521         default:
14522                 return rte_flow_error_set(err, ENOTSUP,
14523                                           RTE_FLOW_ERROR_TYPE_ACTION,
14524                                           NULL,
14525                                           "action type update not supported");
14526         }
14527 }
14528
14529 /**
14530  * Destroy the meter sub policy table rules.
14531  * Lock free, (mutex should be acquired by caller).
14532  *
14533  * @param[in] dev
14534  *   Pointer to Ethernet device.
14535  * @param[in] sub_policy
14536  *   Pointer to meter sub policy table.
14537  */
14538 static void
14539 __flow_dv_destroy_sub_policy_rules(struct rte_eth_dev *dev,
14540                              struct mlx5_flow_meter_sub_policy *sub_policy)
14541 {
14542         struct mlx5_flow_tbl_data_entry *tbl;
14543         int i;
14544
14545         for (i = 0; i < RTE_COLORS; i++) {
14546                 if (sub_policy->color_rule[i]) {
14547                         claim_zero(mlx5_flow_os_destroy_flow
14548                                 (sub_policy->color_rule[i]));
14549                         sub_policy->color_rule[i] = NULL;
14550                 }
14551                 if (sub_policy->color_matcher[i]) {
14552                         tbl = container_of(sub_policy->color_matcher[i]->tbl,
14553                                 typeof(*tbl), tbl);
14554                         mlx5_cache_unregister(&tbl->matchers,
14555                                       &sub_policy->color_matcher[i]->entry);
14556                         sub_policy->color_matcher[i] = NULL;
14557                 }
14558         }
14559         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
14560                 if (sub_policy->rix_hrxq[i]) {
14561                         mlx5_hrxq_release(dev, sub_policy->rix_hrxq[i]);
14562                         sub_policy->rix_hrxq[i] = 0;
14563                 }
14564                 if (sub_policy->jump_tbl[i]) {
14565                         flow_dv_tbl_resource_release(MLX5_SH(dev),
14566                         sub_policy->jump_tbl[i]);
14567                         sub_policy->jump_tbl[i] = NULL;
14568                 }
14569         }
14570         if (sub_policy->tbl_rsc) {
14571                 flow_dv_tbl_resource_release(MLX5_SH(dev),
14572                         sub_policy->tbl_rsc);
14573                 sub_policy->tbl_rsc = NULL;
14574         }
14575 }
14576
14577 /**
14578  * Destroy policy rules, lock free,
14579  * (mutex should be acquired by caller).
14580  * Dispatcher for action type specific call.
14581  *
14582  * @param[in] dev
14583  *   Pointer to the Ethernet device structure.
14584  * @param[in] mtr_policy
14585  *   Meter policy struct.
14586  */
14587 static void
14588 flow_dv_destroy_policy_rules(struct rte_eth_dev *dev,
14589                       struct mlx5_flow_meter_policy *mtr_policy)
14590 {
14591         uint32_t i, j;
14592         struct mlx5_flow_meter_sub_policy *sub_policy;
14593         uint16_t sub_policy_num;
14594
14595         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
14596                 sub_policy_num = (mtr_policy->sub_policy_num >>
14597                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
14598                         MLX5_MTR_SUB_POLICY_NUM_MASK;
14599                 for (j = 0; j < sub_policy_num; j++) {
14600                         sub_policy = mtr_policy->sub_policys[i][j];
14601                         if (sub_policy)
14602                                 __flow_dv_destroy_sub_policy_rules
14603                                                 (dev, sub_policy);
14604                 }
14605         }
14606 }
14607
14608 /**
14609  * Destroy policy action, lock free,
14610  * (mutex should be acquired by caller).
14611  * Dispatcher for action type specific call.
14612  *
14613  * @param[in] dev
14614  *   Pointer to the Ethernet device structure.
14615  * @param[in] mtr_policy
14616  *   Meter policy struct.
14617  */
14618 static void
14619 flow_dv_destroy_mtr_policy_acts(struct rte_eth_dev *dev,
14620                       struct mlx5_flow_meter_policy *mtr_policy)
14621 {
14622         struct rte_flow_action *rss_action;
14623         struct mlx5_flow_handle dev_handle;
14624         uint32_t i, j;
14625
14626         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
14627                 if (mtr_policy->act_cnt[i].rix_mark) {
14628                         flow_dv_tag_release(dev,
14629                                 mtr_policy->act_cnt[i].rix_mark);
14630                         mtr_policy->act_cnt[i].rix_mark = 0;
14631                 }
14632                 if (mtr_policy->act_cnt[i].modify_hdr) {
14633                         dev_handle.dvh.modify_hdr =
14634                                 mtr_policy->act_cnt[i].modify_hdr;
14635                         flow_dv_modify_hdr_resource_release(dev, &dev_handle);
14636                 }
14637                 switch (mtr_policy->act_cnt[i].fate_action) {
14638                 case MLX5_FLOW_FATE_SHARED_RSS:
14639                         rss_action = mtr_policy->act_cnt[i].rss;
14640                         mlx5_free(rss_action);
14641                         break;
14642                 case MLX5_FLOW_FATE_PORT_ID:
14643                         if (mtr_policy->act_cnt[i].rix_port_id_action) {
14644                                 flow_dv_port_id_action_resource_release(dev,
14645                                 mtr_policy->act_cnt[i].rix_port_id_action);
14646                                 mtr_policy->act_cnt[i].rix_port_id_action = 0;
14647                         }
14648                         break;
14649                 case MLX5_FLOW_FATE_DROP:
14650                 case MLX5_FLOW_FATE_JUMP:
14651                         for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
14652                                 mtr_policy->act_cnt[i].dr_jump_action[j] =
14653                                                 NULL;
14654                         break;
14655                 default:
14656                         /*Queue action do nothing*/
14657                         break;
14658                 }
14659         }
14660         for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
14661                 mtr_policy->dr_drop_action[j] = NULL;
14662 }
14663
14664 /**
14665  * Create policy action per domain, lock free,
14666  * (mutex should be acquired by caller).
14667  * Dispatcher for action type specific call.
14668  *
14669  * @param[in] dev
14670  *   Pointer to the Ethernet device structure.
14671  * @param[in] mtr_policy
14672  *   Meter policy struct.
14673  * @param[in] action
14674  *   Action specification used to create meter actions.
14675  * @param[out] error
14676  *   Perform verbose error reporting if not NULL. Initialized in case of
14677  *   error only.
14678  *
14679  * @return
14680  *   0 on success, otherwise negative errno value.
14681  */
14682 static int
14683 __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,
14684                         struct mlx5_flow_meter_policy *mtr_policy,
14685                         const struct rte_flow_action *actions[RTE_COLORS],
14686                         enum mlx5_meter_domain domain,
14687                         struct rte_mtr_error *error)
14688 {
14689         struct mlx5_priv *priv = dev->data->dev_private;
14690         struct rte_flow_error flow_err;
14691         const struct rte_flow_action *act;
14692         uint64_t action_flags = 0;
14693         struct mlx5_flow_handle dh;
14694         struct mlx5_flow dev_flow;
14695         struct mlx5_flow_dv_port_id_action_resource port_id_action;
14696         int i, ret;
14697         uint8_t egress, transfer;
14698         struct mlx5_meter_policy_action_container *act_cnt = NULL;
14699         union {
14700                 struct mlx5_flow_dv_modify_hdr_resource res;
14701                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
14702                             sizeof(struct mlx5_modification_cmd) *
14703                             (MLX5_MAX_MODIFY_NUM + 1)];
14704         } mhdr_dummy;
14705
14706         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
14707         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
14708         memset(&dh, 0, sizeof(struct mlx5_flow_handle));
14709         memset(&dev_flow, 0, sizeof(struct mlx5_flow));
14710         memset(&port_id_action, 0,
14711                 sizeof(struct mlx5_flow_dv_port_id_action_resource));
14712         dev_flow.handle = &dh;
14713         dev_flow.dv.port_id_action = &port_id_action;
14714         dev_flow.external = true;
14715         for (i = 0; i < RTE_COLORS; i++) {
14716                 if (i < MLX5_MTR_RTE_COLORS)
14717                         act_cnt = &mtr_policy->act_cnt[i];
14718                 for (act = actions[i];
14719                         act && act->type != RTE_FLOW_ACTION_TYPE_END;
14720                         act++) {
14721                         switch (act->type) {
14722                         case RTE_FLOW_ACTION_TYPE_MARK:
14723                         {
14724                                 uint32_t tag_be = mlx5_flow_mark_set
14725                                         (((const struct rte_flow_action_mark *)
14726                                         (act->conf))->id);
14727
14728                                 if (i >= MLX5_MTR_RTE_COLORS)
14729                                         return -rte_mtr_error_set(error,
14730                                           ENOTSUP,
14731                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
14732                                           NULL,
14733                                           "cannot create policy "
14734                                           "mark action for this color");
14735                                 dev_flow.handle->mark = 1;
14736                                 if (flow_dv_tag_resource_register(dev, tag_be,
14737                                                   &dev_flow, &flow_err))
14738                                         return -rte_mtr_error_set(error,
14739                                         ENOTSUP,
14740                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14741                                         NULL,
14742                                         "cannot setup policy mark action");
14743                                 MLX5_ASSERT(dev_flow.dv.tag_resource);
14744                                 act_cnt->rix_mark =
14745                                         dev_flow.handle->dvh.rix_tag;
14746                                 action_flags |= MLX5_FLOW_ACTION_MARK;
14747                                 break;
14748                         }
14749                         case RTE_FLOW_ACTION_TYPE_SET_TAG:
14750                         {
14751                                 struct mlx5_flow_dv_modify_hdr_resource
14752                                         *mhdr_res = &mhdr_dummy.res;
14753
14754                                 if (i >= MLX5_MTR_RTE_COLORS)
14755                                         return -rte_mtr_error_set(error,
14756                                           ENOTSUP,
14757                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
14758                                           NULL,
14759                                           "cannot create policy "
14760                                           "set tag action for this color");
14761                                 memset(mhdr_res, 0, sizeof(*mhdr_res));
14762                                 mhdr_res->ft_type = transfer ?
14763                                         MLX5DV_FLOW_TABLE_TYPE_FDB :
14764                                         egress ?
14765                                         MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
14766                                         MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
14767                                 if (flow_dv_convert_action_set_tag
14768                                 (dev, mhdr_res,
14769                                 (const struct rte_flow_action_set_tag *)
14770                                 act->conf,  &flow_err))
14771                                         return -rte_mtr_error_set(error,
14772                                         ENOTSUP,
14773                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14774                                         NULL, "cannot convert policy "
14775                                         "set tag action");
14776                                 if (!mhdr_res->actions_num)
14777                                         return -rte_mtr_error_set(error,
14778                                         ENOTSUP,
14779                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14780                                         NULL, "cannot find policy "
14781                                         "set tag action");
14782                                 /* create modify action if needed. */
14783                                 dev_flow.dv.group = 1;
14784                                 if (flow_dv_modify_hdr_resource_register
14785                                         (dev, mhdr_res, &dev_flow, &flow_err))
14786                                         return -rte_mtr_error_set(error,
14787                                         ENOTSUP,
14788                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14789                                         NULL, "cannot register policy "
14790                                         "set tag action");
14791                                 act_cnt->modify_hdr =
14792                                 dev_flow.handle->dvh.modify_hdr;
14793                                 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
14794                                 break;
14795                         }
14796                         case RTE_FLOW_ACTION_TYPE_DROP:
14797                         {
14798                                 struct mlx5_flow_mtr_mng *mtrmng =
14799                                                 priv->sh->mtrmng;
14800                                 struct mlx5_flow_tbl_data_entry *tbl_data;
14801
14802                                 /*
14803                                  * Create the drop table with
14804                                  * METER DROP level.
14805                                  */
14806                                 if (!mtrmng->drop_tbl[domain]) {
14807                                         mtrmng->drop_tbl[domain] =
14808                                         flow_dv_tbl_resource_get(dev,
14809                                         MLX5_FLOW_TABLE_LEVEL_METER,
14810                                         egress, transfer, false, NULL, 0,
14811                                         0, MLX5_MTR_TABLE_ID_DROP, &flow_err);
14812                                         if (!mtrmng->drop_tbl[domain])
14813                                                 return -rte_mtr_error_set
14814                                         (error, ENOTSUP,
14815                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14816                                         NULL,
14817                                         "Failed to create meter drop table");
14818                                 }
14819                                 tbl_data = container_of
14820                                 (mtrmng->drop_tbl[domain],
14821                                 struct mlx5_flow_tbl_data_entry, tbl);
14822                                 if (i < MLX5_MTR_RTE_COLORS) {
14823                                         act_cnt->dr_jump_action[domain] =
14824                                                 tbl_data->jump.action;
14825                                         act_cnt->fate_action =
14826                                                 MLX5_FLOW_FATE_DROP;
14827                                 }
14828                                 if (i == RTE_COLOR_RED)
14829                                         mtr_policy->dr_drop_action[domain] =
14830                                                 tbl_data->jump.action;
14831                                 action_flags |= MLX5_FLOW_ACTION_DROP;
14832                                 break;
14833                         }
14834                         case RTE_FLOW_ACTION_TYPE_QUEUE:
14835                         {
14836                                 if (i >= MLX5_MTR_RTE_COLORS)
14837                                         return -rte_mtr_error_set(error,
14838                                         ENOTSUP,
14839                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14840                                         NULL, "cannot create policy "
14841                                         "fate queue for this color");
14842                                 act_cnt->queue =
14843                                 ((const struct rte_flow_action_queue *)
14844                                         (act->conf))->index;
14845                                 act_cnt->fate_action =
14846                                         MLX5_FLOW_FATE_QUEUE;
14847                                 dev_flow.handle->fate_action =
14848                                         MLX5_FLOW_FATE_QUEUE;
14849                                 mtr_policy->is_queue = 1;
14850                                 action_flags |= MLX5_FLOW_ACTION_QUEUE;
14851                                 break;
14852                         }
14853                         case RTE_FLOW_ACTION_TYPE_RSS:
14854                         {
14855                                 int rss_size;
14856
14857                                 if (i >= MLX5_MTR_RTE_COLORS)
14858                                         return -rte_mtr_error_set(error,
14859                                           ENOTSUP,
14860                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
14861                                           NULL,
14862                                           "cannot create policy "
14863                                           "rss action for this color");
14864                                 /*
14865                                  * Save RSS conf into policy struct
14866                                  * for translate stage.
14867                                  */
14868                                 rss_size = (int)rte_flow_conv
14869                                         (RTE_FLOW_CONV_OP_ACTION,
14870                                         NULL, 0, act, &flow_err);
14871                                 if (rss_size <= 0)
14872                                         return -rte_mtr_error_set(error,
14873                                           ENOTSUP,
14874                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
14875                                           NULL, "Get the wrong "
14876                                           "rss action struct size");
14877                                 act_cnt->rss = mlx5_malloc(MLX5_MEM_ZERO,
14878                                                 rss_size, 0, SOCKET_ID_ANY);
14879                                 if (!act_cnt->rss)
14880                                         return -rte_mtr_error_set(error,
14881                                           ENOTSUP,
14882                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
14883                                           NULL,
14884                                           "Fail to malloc rss action memory");
14885                                 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION,
14886                                         act_cnt->rss, rss_size,
14887                                         act, &flow_err);
14888                                 if (ret < 0)
14889                                         return -rte_mtr_error_set(error,
14890                                           ENOTSUP,
14891                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
14892                                           NULL, "Fail to save "
14893                                           "rss action into policy struct");
14894                                 act_cnt->fate_action =
14895                                         MLX5_FLOW_FATE_SHARED_RSS;
14896                                 action_flags |= MLX5_FLOW_ACTION_RSS;
14897                                 break;
14898                         }
14899                         case RTE_FLOW_ACTION_TYPE_PORT_ID:
14900                         {
14901                                 struct mlx5_flow_dv_port_id_action_resource
14902                                         port_id_resource;
14903                                 uint32_t port_id = 0;
14904
14905                                 if (i >= MLX5_MTR_RTE_COLORS)
14906                                         return -rte_mtr_error_set(error,
14907                                         ENOTSUP,
14908                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14909                                         NULL, "cannot create policy "
14910                                         "port action for this color");
14911                                 memset(&port_id_resource, 0,
14912                                         sizeof(port_id_resource));
14913                                 if (flow_dv_translate_action_port_id(dev, act,
14914                                                 &port_id, &flow_err))
14915                                         return -rte_mtr_error_set(error,
14916                                         ENOTSUP,
14917                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14918                                         NULL, "cannot translate "
14919                                         "policy port action");
14920                                 port_id_resource.port_id = port_id;
14921                                 if (flow_dv_port_id_action_resource_register
14922                                         (dev, &port_id_resource,
14923                                         &dev_flow, &flow_err))
14924                                         return -rte_mtr_error_set(error,
14925                                         ENOTSUP,
14926                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14927                                         NULL, "cannot setup "
14928                                         "policy port action");
14929                                 act_cnt->rix_port_id_action =
14930                                         dev_flow.handle->rix_port_id_action;
14931                                 act_cnt->fate_action =
14932                                         MLX5_FLOW_FATE_PORT_ID;
14933                                 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
14934                                 break;
14935                         }
14936                         case RTE_FLOW_ACTION_TYPE_JUMP:
14937                         {
14938                                 uint32_t jump_group = 0;
14939                                 uint32_t table = 0;
14940                                 struct mlx5_flow_tbl_data_entry *tbl_data;
14941                                 struct flow_grp_info grp_info = {
14942                                         .external = !!dev_flow.external,
14943                                         .transfer = !!transfer,
14944                                         .fdb_def_rule = !!priv->fdb_def_rule,
14945                                         .std_tbl_fix = 0,
14946                                         .skip_scale = dev_flow.skip_scale &
14947                                         (1 << MLX5_SCALE_FLOW_GROUP_BIT),
14948                                 };
14949                                 struct mlx5_flow_meter_sub_policy *sub_policy =
14950                                 mtr_policy->sub_policys[domain][0];
14951
14952                                 if (i >= MLX5_MTR_RTE_COLORS)
14953                                         return -rte_mtr_error_set(error,
14954                                           ENOTSUP,
14955                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
14956                                           NULL,
14957                                           "cannot create policy "
14958                                           "jump action for this color");
14959                                 jump_group =
14960                                 ((const struct rte_flow_action_jump *)
14961                                                         act->conf)->group;
14962                                 if (mlx5_flow_group_to_table(dev, NULL,
14963                                                        jump_group,
14964                                                        &table,
14965                                                        &grp_info, &flow_err))
14966                                         return -rte_mtr_error_set(error,
14967                                         ENOTSUP,
14968                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14969                                         NULL, "cannot setup "
14970                                         "policy jump action");
14971                                 sub_policy->jump_tbl[i] =
14972                                 flow_dv_tbl_resource_get(dev,
14973                                         table, egress,
14974                                         transfer,
14975                                         !!dev_flow.external,
14976                                         NULL, jump_group, 0,
14977                                         0, &flow_err);
14978                                 if
14979                                 (!sub_policy->jump_tbl[i])
14980                                         return  -rte_mtr_error_set(error,
14981                                         ENOTSUP,
14982                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14983                                         NULL, "cannot create jump action.");
14984                                 tbl_data = container_of
14985                                 (sub_policy->jump_tbl[i],
14986                                 struct mlx5_flow_tbl_data_entry, tbl);
14987                                 act_cnt->dr_jump_action[domain] =
14988                                         tbl_data->jump.action;
14989                                 act_cnt->fate_action =
14990                                         MLX5_FLOW_FATE_JUMP;
14991                                 action_flags |= MLX5_FLOW_ACTION_JUMP;
14992                                 break;
14993                         }
14994                         default:
14995                                 return -rte_mtr_error_set(error, ENOTSUP,
14996                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
14997                                           NULL, "action type not supported");
14998                         }
14999                 }
15000         }
15001         return 0;
15002 }
15003
15004 /**
15005  * Create policy action per domain, lock free,
15006  * (mutex should be acquired by caller).
15007  * Dispatcher for action type specific call.
15008  *
15009  * @param[in] dev
15010  *   Pointer to the Ethernet device structure.
15011  * @param[in] mtr_policy
15012  *   Meter policy struct.
15013  * @param[in] action
15014  *   Action specification used to create meter actions.
15015  * @param[out] error
15016  *   Perform verbose error reporting if not NULL. Initialized in case of
15017  *   error only.
15018  *
15019  * @return
15020  *   0 on success, otherwise negative errno value.
15021  */
15022 static int
15023 flow_dv_create_mtr_policy_acts(struct rte_eth_dev *dev,
15024                       struct mlx5_flow_meter_policy *mtr_policy,
15025                       const struct rte_flow_action *actions[RTE_COLORS],
15026                       struct rte_mtr_error *error)
15027 {
15028         int ret, i;
15029         uint16_t sub_policy_num;
15030
15031         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15032                 sub_policy_num = (mtr_policy->sub_policy_num >>
15033                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
15034                         MLX5_MTR_SUB_POLICY_NUM_MASK;
15035                 if (sub_policy_num) {
15036                         ret = __flow_dv_create_domain_policy_acts(dev,
15037                                 mtr_policy, actions,
15038                                 (enum mlx5_meter_domain)i, error);
15039                         if (ret)
15040                                 return ret;
15041                 }
15042         }
15043         return 0;
15044 }
15045
15046 /**
15047  * Query a DV flow rule for its statistics via DevX.
15048  *
15049  * @param[in] dev
15050  *   Pointer to Ethernet device.
15051  * @param[in] cnt_idx
15052  *   Index to the flow counter.
15053  * @param[out] data
15054  *   Data retrieved by the query.
15055  * @param[out] error
15056  *   Perform verbose error reporting if not NULL.
15057  *
15058  * @return
15059  *   0 on success, a negative errno value otherwise and rte_errno is set.
15060  */
15061 static int
15062 flow_dv_query_count(struct rte_eth_dev *dev, uint32_t cnt_idx, void *data,
15063                     struct rte_flow_error *error)
15064 {
15065         struct mlx5_priv *priv = dev->data->dev_private;
15066         struct rte_flow_query_count *qc = data;
15067
15068         if (!priv->config.devx)
15069                 return rte_flow_error_set(error, ENOTSUP,
15070                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15071                                           NULL,
15072                                           "counters are not supported");
15073         if (cnt_idx) {
15074                 uint64_t pkts, bytes;
15075                 struct mlx5_flow_counter *cnt;
15076                 int err = _flow_dv_query_count(dev, cnt_idx, &pkts, &bytes);
15077
15078                 if (err)
15079                         return rte_flow_error_set(error, -err,
15080                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15081                                         NULL, "cannot read counters");
15082                 cnt = flow_dv_counter_get_by_idx(dev, cnt_idx, NULL);
15083                 qc->hits_set = 1;
15084                 qc->bytes_set = 1;
15085                 qc->hits = pkts - cnt->hits;
15086                 qc->bytes = bytes - cnt->bytes;
15087                 if (qc->reset) {
15088                         cnt->hits = pkts;
15089                         cnt->bytes = bytes;
15090                 }
15091                 return 0;
15092         }
15093         return rte_flow_error_set(error, EINVAL,
15094                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15095                                   NULL,
15096                                   "counters are not available");
15097 }
15098
15099 static int
15100 flow_dv_action_query(struct rte_eth_dev *dev,
15101                      const struct rte_flow_action_handle *handle, void *data,
15102                      struct rte_flow_error *error)
15103 {
15104         struct mlx5_age_param *age_param;
15105         struct rte_flow_query_age *resp;
15106         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
15107         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
15108         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
15109         struct mlx5_priv *priv = dev->data->dev_private;
15110         struct mlx5_aso_ct_action *ct;
15111         uint16_t owner;
15112         uint32_t dev_idx;
15113
15114         switch (type) {
15115         case MLX5_INDIRECT_ACTION_TYPE_AGE:
15116                 age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params;
15117                 resp = data;
15118                 resp->aged = __atomic_load_n(&age_param->state,
15119                                               __ATOMIC_RELAXED) == AGE_TMOUT ?
15120                                                                           1 : 0;
15121                 resp->sec_since_last_hit_valid = !resp->aged;
15122                 if (resp->sec_since_last_hit_valid)
15123                         resp->sec_since_last_hit = __atomic_load_n
15124                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
15125                 return 0;
15126         case MLX5_INDIRECT_ACTION_TYPE_COUNT:
15127                 return flow_dv_query_count(dev, idx, data, error);
15128         case MLX5_INDIRECT_ACTION_TYPE_CT:
15129                 owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx);
15130                 if (owner != PORT_ID(priv))
15131                         return rte_flow_error_set(error, EACCES,
15132                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15133                                         NULL,
15134                                         "CT object owned by another port");
15135                 dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx);
15136                 ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx);
15137                 MLX5_ASSERT(ct);
15138                 if (!ct->refcnt)
15139                         return rte_flow_error_set(error, EFAULT,
15140                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15141                                         NULL,
15142                                         "CT object is inactive");
15143                 ((struct rte_flow_action_conntrack *)data)->peer_port =
15144                                                         ct->peer;
15145                 ((struct rte_flow_action_conntrack *)data)->is_original_dir =
15146                                                         ct->is_original;
15147                 if (mlx5_aso_ct_query_by_wqe(priv->sh, ct, data))
15148                         return rte_flow_error_set(error, EIO,
15149                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15150                                         NULL,
15151                                         "Failed to query CT context");
15152                 return 0;
15153         default:
15154                 return rte_flow_error_set(error, ENOTSUP,
15155                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
15156                                           "action type query not supported");
15157         }
15158 }
15159
15160 /**
15161  * Query a flow rule AGE action for aging information.
15162  *
15163  * @param[in] dev
15164  *   Pointer to Ethernet device.
15165  * @param[in] flow
15166  *   Pointer to the sub flow.
15167  * @param[out] data
15168  *   data retrieved by the query.
15169  * @param[out] error
15170  *   Perform verbose error reporting if not NULL.
15171  *
15172  * @return
15173  *   0 on success, a negative errno value otherwise and rte_errno is set.
15174  */
15175 static int
15176 flow_dv_query_age(struct rte_eth_dev *dev, struct rte_flow *flow,
15177                   void *data, struct rte_flow_error *error)
15178 {
15179         struct rte_flow_query_age *resp = data;
15180         struct mlx5_age_param *age_param;
15181
15182         if (flow->age) {
15183                 struct mlx5_aso_age_action *act =
15184                                      flow_aso_age_get_by_idx(dev, flow->age);
15185
15186                 age_param = &act->age_params;
15187         } else if (flow->counter) {
15188                 age_param = flow_dv_counter_idx_get_age(dev, flow->counter);
15189
15190                 if (!age_param || !age_param->timeout)
15191                         return rte_flow_error_set
15192                                         (error, EINVAL,
15193                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15194                                          NULL, "cannot read age data");
15195         } else {
15196                 return rte_flow_error_set(error, EINVAL,
15197                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15198                                           NULL, "age data not available");
15199         }
15200         resp->aged = __atomic_load_n(&age_param->state, __ATOMIC_RELAXED) ==
15201                                      AGE_TMOUT ? 1 : 0;
15202         resp->sec_since_last_hit_valid = !resp->aged;
15203         if (resp->sec_since_last_hit_valid)
15204                 resp->sec_since_last_hit = __atomic_load_n
15205                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
15206         return 0;
15207 }
15208
15209 /**
15210  * Query a flow.
15211  *
15212  * @see rte_flow_query()
15213  * @see rte_flow_ops
15214  */
15215 static int
15216 flow_dv_query(struct rte_eth_dev *dev,
15217               struct rte_flow *flow __rte_unused,
15218               const struct rte_flow_action *actions __rte_unused,
15219               void *data __rte_unused,
15220               struct rte_flow_error *error __rte_unused)
15221 {
15222         int ret = -EINVAL;
15223
15224         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
15225                 switch (actions->type) {
15226                 case RTE_FLOW_ACTION_TYPE_VOID:
15227                         break;
15228                 case RTE_FLOW_ACTION_TYPE_COUNT:
15229                         ret = flow_dv_query_count(dev, flow->counter, data,
15230                                                   error);
15231                         break;
15232                 case RTE_FLOW_ACTION_TYPE_AGE:
15233                         ret = flow_dv_query_age(dev, flow, data, error);
15234                         break;
15235                 default:
15236                         return rte_flow_error_set(error, ENOTSUP,
15237                                                   RTE_FLOW_ERROR_TYPE_ACTION,
15238                                                   actions,
15239                                                   "action not supported");
15240                 }
15241         }
15242         return ret;
15243 }
15244
15245 /**
15246  * Destroy the meter table set.
15247  * Lock free, (mutex should be acquired by caller).
15248  *
15249  * @param[in] dev
15250  *   Pointer to Ethernet device.
15251  * @param[in] fm
15252  *   Meter information table.
15253  */
15254 static void
15255 flow_dv_destroy_mtr_tbls(struct rte_eth_dev *dev,
15256                         struct mlx5_flow_meter_info *fm)
15257 {
15258         struct mlx5_priv *priv = dev->data->dev_private;
15259         int i;
15260
15261         if (!fm || !priv->config.dv_flow_en)
15262                 return;
15263         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15264                 if (fm->drop_rule[i]) {
15265                         claim_zero(mlx5_flow_os_destroy_flow(fm->drop_rule[i]));
15266                         fm->drop_rule[i] = NULL;
15267                 }
15268         }
15269 }
15270
15271 static void
15272 flow_dv_destroy_mtr_drop_tbls(struct rte_eth_dev *dev)
15273 {
15274         struct mlx5_priv *priv = dev->data->dev_private;
15275         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
15276         struct mlx5_flow_tbl_data_entry *tbl;
15277         int i, j;
15278
15279         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15280                 if (mtrmng->def_rule[i]) {
15281                         claim_zero(mlx5_flow_os_destroy_flow
15282                                         (mtrmng->def_rule[i]));
15283                         mtrmng->def_rule[i] = NULL;
15284                 }
15285                 if (mtrmng->def_matcher[i]) {
15286                         tbl = container_of(mtrmng->def_matcher[i]->tbl,
15287                                 struct mlx5_flow_tbl_data_entry, tbl);
15288                         mlx5_cache_unregister(&tbl->matchers,
15289                                       &mtrmng->def_matcher[i]->entry);
15290                         mtrmng->def_matcher[i] = NULL;
15291                 }
15292                 for (j = 0; j < MLX5_REG_BITS; j++) {
15293                         if (mtrmng->drop_matcher[i][j]) {
15294                                 tbl =
15295                                 container_of(mtrmng->drop_matcher[i][j]->tbl,
15296                                              struct mlx5_flow_tbl_data_entry,
15297                                              tbl);
15298                                 mlx5_cache_unregister(&tbl->matchers,
15299                                         &mtrmng->drop_matcher[i][j]->entry);
15300                                 mtrmng->drop_matcher[i][j] = NULL;
15301                         }
15302                 }
15303                 if (mtrmng->drop_tbl[i]) {
15304                         flow_dv_tbl_resource_release(MLX5_SH(dev),
15305                                 mtrmng->drop_tbl[i]);
15306                         mtrmng->drop_tbl[i] = NULL;
15307                 }
15308         }
15309 }
15310
15311 /* Number of meter flow actions, count and jump or count and drop. */
15312 #define METER_ACTIONS 2
15313
15314 static void
15315 __flow_dv_destroy_domain_def_policy(struct rte_eth_dev *dev,
15316                               enum mlx5_meter_domain domain)
15317 {
15318         struct mlx5_priv *priv = dev->data->dev_private;
15319         struct mlx5_flow_meter_def_policy *def_policy =
15320                         priv->sh->mtrmng->def_policy[domain];
15321
15322         __flow_dv_destroy_sub_policy_rules(dev, &def_policy->sub_policy);
15323         mlx5_free(def_policy);
15324         priv->sh->mtrmng->def_policy[domain] = NULL;
15325 }
15326
15327 /**
15328  * Destroy the default policy table set.
15329  *
15330  * @param[in] dev
15331  *   Pointer to Ethernet device.
15332  */
15333 static void
15334 flow_dv_destroy_def_policy(struct rte_eth_dev *dev)
15335 {
15336         struct mlx5_priv *priv = dev->data->dev_private;
15337         int i;
15338
15339         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++)
15340                 if (priv->sh->mtrmng->def_policy[i])
15341                         __flow_dv_destroy_domain_def_policy(dev,
15342                                         (enum mlx5_meter_domain)i);
15343         priv->sh->mtrmng->def_policy_id = MLX5_INVALID_POLICY_ID;
15344 }
15345
15346 static int
15347 __flow_dv_create_policy_flow(struct rte_eth_dev *dev,
15348                         uint32_t color_reg_c_idx,
15349                         enum rte_color color, void *matcher_object,
15350                         int actions_n, void *actions,
15351                         bool is_default_policy, void **rule,
15352                         const struct rte_flow_attr *attr)
15353 {
15354         int ret;
15355         struct mlx5_flow_dv_match_params value = {
15356                 .size = sizeof(value.buf) -
15357                         MLX5_ST_SZ_BYTES(fte_match_set_misc4),
15358         };
15359         struct mlx5_flow_dv_match_params matcher = {
15360                 .size = sizeof(matcher.buf) -
15361                         MLX5_ST_SZ_BYTES(fte_match_set_misc4),
15362         };
15363         struct mlx5_priv *priv = dev->data->dev_private;
15364
15365         if (!is_default_policy && (priv->representor || priv->master)) {
15366                 if (flow_dv_translate_item_port_id(dev, matcher.buf,
15367                                                    value.buf, NULL, attr)) {
15368                         DRV_LOG(ERR,
15369                         "Failed to create meter policy flow with port.");
15370                         return -1;
15371                 }
15372         }
15373         flow_dv_match_meta_reg(matcher.buf, value.buf,
15374                                 (enum modify_reg)color_reg_c_idx,
15375                                 rte_col_2_mlx5_col(color),
15376                                 UINT32_MAX);
15377         ret = mlx5_flow_os_create_flow(matcher_object,
15378                         (void *)&value, actions_n, actions, rule);
15379         if (ret) {
15380                 DRV_LOG(ERR, "Failed to create meter policy flow.");
15381                 return -1;
15382         }
15383         return 0;
15384 }
15385
15386 static int
15387 __flow_dv_create_policy_matcher(struct rte_eth_dev *dev,
15388                         uint32_t color_reg_c_idx,
15389                         uint16_t priority,
15390                         struct mlx5_flow_meter_sub_policy *sub_policy,
15391                         const struct rte_flow_attr *attr,
15392                         bool is_default_policy,
15393                         struct rte_flow_error *error)
15394 {
15395         struct mlx5_cache_entry *entry;
15396         struct mlx5_flow_tbl_resource *tbl_rsc = sub_policy->tbl_rsc;
15397         struct mlx5_flow_dv_matcher matcher = {
15398                 .mask = {
15399                         .size = sizeof(matcher.mask.buf) -
15400                                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
15401                 },
15402                 .tbl = tbl_rsc,
15403         };
15404         struct mlx5_flow_dv_match_params value = {
15405                 .size = sizeof(value.buf) -
15406                         MLX5_ST_SZ_BYTES(fte_match_set_misc4),
15407         };
15408         struct mlx5_flow_cb_ctx ctx = {
15409                 .error = error,
15410                 .data = &matcher,
15411         };
15412         struct mlx5_flow_tbl_data_entry *tbl_data;
15413         struct mlx5_priv *priv = dev->data->dev_private;
15414         uint32_t color_mask = (UINT32_C(1) << MLX5_MTR_COLOR_BITS) - 1;
15415
15416         if (!is_default_policy && (priv->representor || priv->master)) {
15417                 if (flow_dv_translate_item_port_id(dev, matcher.mask.buf,
15418                                                    value.buf, NULL, attr)) {
15419                         DRV_LOG(ERR,
15420                         "Failed to register meter drop matcher with port.");
15421                         return -1;
15422                 }
15423         }
15424         tbl_data = container_of(tbl_rsc, struct mlx5_flow_tbl_data_entry, tbl);
15425         if (priority < RTE_COLOR_RED)
15426                 flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
15427                         (enum modify_reg)color_reg_c_idx, 0, color_mask);
15428         matcher.priority = priority;
15429         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
15430                                         matcher.mask.size);
15431         entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
15432         if (!entry) {
15433                 DRV_LOG(ERR, "Failed to register meter drop matcher.");
15434                 return -1;
15435         }
15436         sub_policy->color_matcher[priority] =
15437                 container_of(entry, struct mlx5_flow_dv_matcher, entry);
15438         return 0;
15439 }
15440
15441 /**
15442  * Create the policy rules per domain.
15443  *
15444  * @param[in] dev
15445  *   Pointer to Ethernet device.
15446  * @param[in] sub_policy
15447  *    Pointer to sub policy table..
15448  * @param[in] egress
15449  *   Direction of the table.
15450  * @param[in] transfer
15451  *   E-Switch or NIC flow.
15452  * @param[in] acts
15453  *   Pointer to policy action list per color.
15454  *
15455  * @return
15456  *   0 on success, -1 otherwise.
15457  */
15458 static int
15459 __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev,
15460                 struct mlx5_flow_meter_sub_policy *sub_policy,
15461                 uint8_t egress, uint8_t transfer, bool is_default_policy,
15462                 struct mlx5_meter_policy_acts acts[RTE_COLORS])
15463 {
15464         struct rte_flow_error flow_err;
15465         uint32_t color_reg_c_idx;
15466         struct rte_flow_attr attr = {
15467                 .group = MLX5_FLOW_TABLE_LEVEL_POLICY,
15468                 .priority = 0,
15469                 .ingress = 0,
15470                 .egress = !!egress,
15471                 .transfer = !!transfer,
15472                 .reserved = 0,
15473         };
15474         int i;
15475         int ret = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &flow_err);
15476
15477         if (ret < 0)
15478                 return -1;
15479         /* Create policy table with POLICY level. */
15480         if (!sub_policy->tbl_rsc)
15481                 sub_policy->tbl_rsc = flow_dv_tbl_resource_get(dev,
15482                                 MLX5_FLOW_TABLE_LEVEL_POLICY,
15483                                 egress, transfer, false, NULL, 0, 0,
15484                                 sub_policy->idx, &flow_err);
15485         if (!sub_policy->tbl_rsc) {
15486                 DRV_LOG(ERR,
15487                         "Failed to create meter sub policy table.");
15488                 return -1;
15489         }
15490         /* Prepare matchers. */
15491         color_reg_c_idx = ret;
15492         for (i = 0; i < RTE_COLORS; i++) {
15493                 if (i == RTE_COLOR_YELLOW || !acts[i].actions_n)
15494                         continue;
15495                 attr.priority = i;
15496                 if (!sub_policy->color_matcher[i]) {
15497                         /* Create matchers for Color. */
15498                         if (__flow_dv_create_policy_matcher(dev,
15499                                 color_reg_c_idx, i, sub_policy,
15500                                 &attr, is_default_policy, &flow_err))
15501                                 return -1;
15502                 }
15503                 /* Create flow, matching color. */
15504                 if (acts[i].actions_n)
15505                         if (__flow_dv_create_policy_flow(dev,
15506                                 color_reg_c_idx, (enum rte_color)i,
15507                                 sub_policy->color_matcher[i]->matcher_object,
15508                                 acts[i].actions_n,
15509                                 acts[i].dv_actions,
15510                                 is_default_policy,
15511                                 &sub_policy->color_rule[i],
15512                                 &attr))
15513                                 return -1;
15514         }
15515         return 0;
15516 }
15517
15518 static int
15519 __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev,
15520                         struct mlx5_flow_meter_policy *mtr_policy,
15521                         struct mlx5_flow_meter_sub_policy *sub_policy,
15522                         uint32_t domain)
15523 {
15524         struct mlx5_priv *priv = dev->data->dev_private;
15525         struct mlx5_meter_policy_acts acts[RTE_COLORS];
15526         struct mlx5_flow_dv_tag_resource *tag;
15527         struct mlx5_flow_dv_port_id_action_resource *port_action;
15528         struct mlx5_hrxq *hrxq;
15529         uint8_t egress, transfer;
15530         int i;
15531
15532         for (i = 0; i < RTE_COLORS; i++) {
15533                 acts[i].actions_n = 0;
15534                 if (i == RTE_COLOR_YELLOW)
15535                         continue;
15536                 if (i == RTE_COLOR_RED) {
15537                         /* Only support drop on red. */
15538                         acts[i].dv_actions[0] =
15539                         mtr_policy->dr_drop_action[domain];
15540                         acts[i].actions_n = 1;
15541                         continue;
15542                 }
15543                 if (mtr_policy->act_cnt[i].rix_mark) {
15544                         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG],
15545                                         mtr_policy->act_cnt[i].rix_mark);
15546                         if (!tag) {
15547                                 DRV_LOG(ERR, "Failed to find "
15548                                 "mark action for policy.");
15549                                 return -1;
15550                         }
15551                         acts[i].dv_actions[acts[i].actions_n] =
15552                                                 tag->action;
15553                         acts[i].actions_n++;
15554                 }
15555                 if (mtr_policy->act_cnt[i].modify_hdr) {
15556                         acts[i].dv_actions[acts[i].actions_n] =
15557                         mtr_policy->act_cnt[i].modify_hdr->action;
15558                         acts[i].actions_n++;
15559                 }
15560                 if (mtr_policy->act_cnt[i].fate_action) {
15561                         switch (mtr_policy->act_cnt[i].fate_action) {
15562                         case MLX5_FLOW_FATE_PORT_ID:
15563                                 port_action = mlx5_ipool_get
15564                                         (priv->sh->ipool[MLX5_IPOOL_PORT_ID],
15565                                 mtr_policy->act_cnt[i].rix_port_id_action);
15566                                 if (!port_action) {
15567                                         DRV_LOG(ERR, "Failed to find "
15568                                                 "port action for policy.");
15569                                         return -1;
15570                                 }
15571                                 acts[i].dv_actions[acts[i].actions_n] =
15572                                 port_action->action;
15573                                 acts[i].actions_n++;
15574                                 break;
15575                         case MLX5_FLOW_FATE_DROP:
15576                         case MLX5_FLOW_FATE_JUMP:
15577                                 acts[i].dv_actions[acts[i].actions_n] =
15578                                 mtr_policy->act_cnt[i].dr_jump_action[domain];
15579                                 acts[i].actions_n++;
15580                                 break;
15581                         case MLX5_FLOW_FATE_SHARED_RSS:
15582                         case MLX5_FLOW_FATE_QUEUE:
15583                                 hrxq = mlx5_ipool_get
15584                                 (priv->sh->ipool[MLX5_IPOOL_HRXQ],
15585                                 sub_policy->rix_hrxq[i]);
15586                                 if (!hrxq) {
15587                                         DRV_LOG(ERR, "Failed to find "
15588                                                 "queue action for policy.");
15589                                         return -1;
15590                                 }
15591                                 acts[i].dv_actions[acts[i].actions_n] =
15592                                 hrxq->action;
15593                                 acts[i].actions_n++;
15594                                 break;
15595                         default:
15596                                 /*Queue action do nothing*/
15597                                 break;
15598                         }
15599                 }
15600         }
15601         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
15602         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
15603         if (__flow_dv_create_domain_policy_rules(dev, sub_policy,
15604                                 egress, transfer, false, acts)) {
15605                 DRV_LOG(ERR,
15606                 "Failed to create policy rules per domain.");
15607                 return -1;
15608         }
15609         return 0;
15610 }
15611
15612 /**
15613  * Create the policy rules.
15614  *
15615  * @param[in] dev
15616  *   Pointer to Ethernet device.
15617  * @param[in,out] mtr_policy
15618  *   Pointer to meter policy table.
15619  *
15620  * @return
15621  *   0 on success, -1 otherwise.
15622  */
15623 static int
15624 flow_dv_create_policy_rules(struct rte_eth_dev *dev,
15625                              struct mlx5_flow_meter_policy *mtr_policy)
15626 {
15627         int i;
15628         uint16_t sub_policy_num;
15629
15630         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15631                 sub_policy_num = (mtr_policy->sub_policy_num >>
15632                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
15633                         MLX5_MTR_SUB_POLICY_NUM_MASK;
15634                 if (!sub_policy_num)
15635                         continue;
15636                 /* Prepare actions list and create policy rules. */
15637                 if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
15638                         mtr_policy->sub_policys[i][0], i)) {
15639                         DRV_LOG(ERR,
15640                         "Failed to create policy action list per domain.");
15641                         return -1;
15642                 }
15643         }
15644         return 0;
15645 }
15646
15647 static int
15648 __flow_dv_create_domain_def_policy(struct rte_eth_dev *dev, uint32_t domain)
15649 {
15650         struct mlx5_priv *priv = dev->data->dev_private;
15651         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
15652         struct mlx5_flow_meter_def_policy *def_policy;
15653         struct mlx5_flow_tbl_resource *jump_tbl;
15654         struct mlx5_flow_tbl_data_entry *tbl_data;
15655         uint8_t egress, transfer;
15656         struct rte_flow_error error;
15657         struct mlx5_meter_policy_acts acts[RTE_COLORS];
15658         int ret;
15659
15660         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
15661         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
15662         def_policy = mtrmng->def_policy[domain];
15663         if (!def_policy) {
15664                 def_policy = mlx5_malloc(MLX5_MEM_ZERO,
15665                         sizeof(struct mlx5_flow_meter_def_policy),
15666                         RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
15667                 if (!def_policy) {
15668                         DRV_LOG(ERR, "Failed to alloc "
15669                                         "default policy table.");
15670                         goto def_policy_error;
15671                 }
15672                 mtrmng->def_policy[domain] = def_policy;
15673                 /* Create the meter suffix table with SUFFIX level. */
15674                 jump_tbl = flow_dv_tbl_resource_get(dev,
15675                                 MLX5_FLOW_TABLE_LEVEL_METER,
15676                                 egress, transfer, false, NULL, 0,
15677                                 0, MLX5_MTR_TABLE_ID_SUFFIX, &error);
15678                 if (!jump_tbl) {
15679                         DRV_LOG(ERR,
15680                                 "Failed to create meter suffix table.");
15681                         goto def_policy_error;
15682                 }
15683                 def_policy->sub_policy.jump_tbl[RTE_COLOR_GREEN] = jump_tbl;
15684                 tbl_data = container_of(jump_tbl,
15685                                 struct mlx5_flow_tbl_data_entry, tbl);
15686                 def_policy->dr_jump_action[RTE_COLOR_GREEN] =
15687                                                 tbl_data->jump.action;
15688                 acts[RTE_COLOR_GREEN].dv_actions[0] =
15689                                                 tbl_data->jump.action;
15690                 acts[RTE_COLOR_GREEN].actions_n = 1;
15691                 /* Create jump action to the drop table. */
15692                 if (!mtrmng->drop_tbl[domain]) {
15693                         mtrmng->drop_tbl[domain] = flow_dv_tbl_resource_get
15694                                 (dev, MLX5_FLOW_TABLE_LEVEL_METER,
15695                                 egress, transfer, false, NULL, 0,
15696                                 0, MLX5_MTR_TABLE_ID_DROP, &error);
15697                         if (!mtrmng->drop_tbl[domain]) {
15698                                 DRV_LOG(ERR, "Failed to create "
15699                                 "meter drop table for default policy.");
15700                                 goto def_policy_error;
15701                         }
15702                 }
15703                 tbl_data = container_of(mtrmng->drop_tbl[domain],
15704                                 struct mlx5_flow_tbl_data_entry, tbl);
15705                 def_policy->dr_jump_action[RTE_COLOR_RED] =
15706                                                 tbl_data->jump.action;
15707                 acts[RTE_COLOR_RED].dv_actions[0] = tbl_data->jump.action;
15708                 acts[RTE_COLOR_RED].actions_n = 1;
15709                 /* Create default policy rules. */
15710                 ret = __flow_dv_create_domain_policy_rules(dev,
15711                                         &def_policy->sub_policy,
15712                                         egress, transfer, true, acts);
15713                 if (ret) {
15714                         DRV_LOG(ERR, "Failed to create "
15715                                 "default policy rules.");
15716                                 goto def_policy_error;
15717                 }
15718         }
15719         return 0;
15720 def_policy_error:
15721         __flow_dv_destroy_domain_def_policy(dev,
15722                         (enum mlx5_meter_domain)domain);
15723         return -1;
15724 }
15725
15726 /**
15727  * Create the default policy table set.
15728  *
15729  * @param[in] dev
15730  *   Pointer to Ethernet device.
15731  * @return
15732  *   0 on success, -1 otherwise.
15733  */
15734 static int
15735 flow_dv_create_def_policy(struct rte_eth_dev *dev)
15736 {
15737         struct mlx5_priv *priv = dev->data->dev_private;
15738         int i;
15739
15740         /* Non-termination policy table. */
15741         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15742                 if (!priv->config.dv_esw_en && i == MLX5_MTR_DOMAIN_TRANSFER)
15743                         continue;
15744                 if (__flow_dv_create_domain_def_policy(dev, i)) {
15745                         DRV_LOG(ERR,
15746                         "Failed to create default policy");
15747                         return -1;
15748                 }
15749         }
15750         return 0;
15751 }
15752
15753 /**
15754  * Create the needed meter tables.
15755  * Lock free, (mutex should be acquired by caller).
15756  *
15757  * @param[in] dev
15758  *   Pointer to Ethernet device.
15759  * @param[in] fm
15760  *   Meter information table.
15761  * @param[in] mtr_idx
15762  *   Meter index.
15763  * @param[in] domain_bitmap
15764  *   Domain bitmap.
15765  * @return
15766  *   0 on success, -1 otherwise.
15767  */
15768 static int
15769 flow_dv_create_mtr_tbls(struct rte_eth_dev *dev,
15770                         struct mlx5_flow_meter_info *fm,
15771                         uint32_t mtr_idx,
15772                         uint8_t domain_bitmap)
15773 {
15774         struct mlx5_priv *priv = dev->data->dev_private;
15775         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
15776         struct rte_flow_error error;
15777         struct mlx5_flow_tbl_data_entry *tbl_data;
15778         uint8_t egress, transfer;
15779         void *actions[METER_ACTIONS];
15780         int domain, ret, i;
15781         struct mlx5_flow_counter *cnt;
15782         struct mlx5_flow_dv_match_params value = {
15783                 .size = sizeof(value.buf) -
15784                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
15785         };
15786         struct mlx5_flow_dv_match_params matcher_para = {
15787                 .size = sizeof(matcher_para.buf) -
15788                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
15789         };
15790         int mtr_id_reg_c = mlx5_flow_get_reg_id(dev, MLX5_MTR_ID,
15791                                                      0, &error);
15792         uint32_t mtr_id_mask = (UINT32_C(1) << mtrmng->max_mtr_bits) - 1;
15793         uint8_t mtr_id_offset = priv->mtr_reg_share ? MLX5_MTR_COLOR_BITS : 0;
15794         struct mlx5_cache_entry *entry;
15795         struct mlx5_flow_dv_matcher matcher = {
15796                 .mask = {
15797                         .size = sizeof(matcher.mask.buf) -
15798                         MLX5_ST_SZ_BYTES(fte_match_set_misc4),
15799                 },
15800         };
15801         struct mlx5_flow_dv_matcher *drop_matcher;
15802         struct mlx5_flow_cb_ctx ctx = {
15803                 .error = &error,
15804                 .data = &matcher,
15805         };
15806
15807         if (!priv->mtr_en || mtr_id_reg_c < 0) {
15808                 rte_errno = ENOTSUP;
15809                 return -1;
15810         }
15811         for (domain = 0; domain < MLX5_MTR_DOMAIN_MAX; domain++) {
15812                 if (!(domain_bitmap & (1 << domain)) ||
15813                         (mtrmng->def_rule[domain] && !fm->drop_cnt))
15814                         continue;
15815                 egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
15816                 transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
15817                 /* Create the drop table with METER DROP level. */
15818                 if (!mtrmng->drop_tbl[domain]) {
15819                         mtrmng->drop_tbl[domain] = flow_dv_tbl_resource_get(dev,
15820                                         MLX5_FLOW_TABLE_LEVEL_METER,
15821                                         egress, transfer, false, NULL, 0,
15822                                         0, MLX5_MTR_TABLE_ID_DROP, &error);
15823                         if (!mtrmng->drop_tbl[domain]) {
15824                                 DRV_LOG(ERR, "Failed to create meter drop table.");
15825                                 goto policy_error;
15826                         }
15827                 }
15828                 /* Create default matcher in drop table. */
15829                 matcher.tbl = mtrmng->drop_tbl[domain],
15830                 tbl_data = container_of(mtrmng->drop_tbl[domain],
15831                                 struct mlx5_flow_tbl_data_entry, tbl);
15832                 if (!mtrmng->def_matcher[domain]) {
15833                         flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
15834                                        (enum modify_reg)mtr_id_reg_c,
15835                                        0, 0);
15836                         matcher.priority = MLX5_MTRS_DEFAULT_RULE_PRIORITY;
15837                         matcher.crc = rte_raw_cksum
15838                                         ((const void *)matcher.mask.buf,
15839                                         matcher.mask.size);
15840                         entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
15841                         if (!entry) {
15842                                 DRV_LOG(ERR, "Failed to register meter "
15843                                 "drop default matcher.");
15844                                 goto policy_error;
15845                         }
15846                         mtrmng->def_matcher[domain] = container_of(entry,
15847                         struct mlx5_flow_dv_matcher, entry);
15848                 }
15849                 /* Create default rule in drop table. */
15850                 if (!mtrmng->def_rule[domain]) {
15851                         i = 0;
15852                         actions[i++] = priv->sh->dr_drop_action;
15853                         flow_dv_match_meta_reg(matcher_para.buf, value.buf,
15854                                 (enum modify_reg)mtr_id_reg_c, 0, 0);
15855                         ret = mlx5_flow_os_create_flow
15856                                 (mtrmng->def_matcher[domain]->matcher_object,
15857                                 (void *)&value, i, actions,
15858                                 &mtrmng->def_rule[domain]);
15859                         if (ret) {
15860                                 DRV_LOG(ERR, "Failed to create meter "
15861                                 "default drop rule for drop table.");
15862                                 goto policy_error;
15863                         }
15864                 }
15865                 if (!fm->drop_cnt)
15866                         continue;
15867                 MLX5_ASSERT(mtrmng->max_mtr_bits);
15868                 if (!mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1]) {
15869                         /* Create matchers for Drop. */
15870                         flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
15871                                         (enum modify_reg)mtr_id_reg_c, 0,
15872                                         (mtr_id_mask << mtr_id_offset));
15873                         matcher.priority = MLX5_REG_BITS - mtrmng->max_mtr_bits;
15874                         matcher.crc = rte_raw_cksum
15875                                         ((const void *)matcher.mask.buf,
15876                                         matcher.mask.size);
15877                         entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
15878                         if (!entry) {
15879                                 DRV_LOG(ERR,
15880                                 "Failed to register meter drop matcher.");
15881                                 goto policy_error;
15882                         }
15883                         mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1] =
15884                                 container_of(entry, struct mlx5_flow_dv_matcher,
15885                                              entry);
15886                 }
15887                 drop_matcher =
15888                         mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1];
15889                 /* Create drop rule, matching meter_id only. */
15890                 flow_dv_match_meta_reg(matcher_para.buf, value.buf,
15891                                 (enum modify_reg)mtr_id_reg_c,
15892                                 (mtr_idx << mtr_id_offset), UINT32_MAX);
15893                 i = 0;
15894                 cnt = flow_dv_counter_get_by_idx(dev,
15895                                         fm->drop_cnt, NULL);
15896                 actions[i++] = cnt->action;
15897                 actions[i++] = priv->sh->dr_drop_action;
15898                 ret = mlx5_flow_os_create_flow(drop_matcher->matcher_object,
15899                                                (void *)&value, i, actions,
15900                                                &fm->drop_rule[domain]);
15901                 if (ret) {
15902                         DRV_LOG(ERR, "Failed to create meter "
15903                                 "drop rule for drop table.");
15904                                 goto policy_error;
15905                 }
15906         }
15907         return 0;
15908 policy_error:
15909         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15910                 if (fm->drop_rule[i]) {
15911                         claim_zero(mlx5_flow_os_destroy_flow
15912                                 (fm->drop_rule[i]));
15913                         fm->drop_rule[i] = NULL;
15914                 }
15915         }
15916         return -1;
15917 }
15918
15919 /**
15920  * Find the policy table for prefix table with RSS.
15921  *
15922  * @param[in] dev
15923  *   Pointer to Ethernet device.
15924  * @param[in] mtr_policy
15925  *   Pointer to meter policy table.
15926  * @param[in] rss_desc
15927  *   Pointer to rss_desc
15928  * @return
15929  *   Pointer to table set on success, NULL otherwise and rte_errno is set.
15930  */
15931 static struct mlx5_flow_meter_sub_policy *
15932 flow_dv_meter_sub_policy_rss_prepare(struct rte_eth_dev *dev,
15933                 struct mlx5_flow_meter_policy *mtr_policy,
15934                 struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS])
15935 {
15936         struct mlx5_priv *priv = dev->data->dev_private;
15937         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
15938         uint32_t sub_policy_idx = 0;
15939         uint32_t hrxq_idx[MLX5_MTR_RTE_COLORS] = {0};
15940         uint32_t i, j;
15941         struct mlx5_hrxq *hrxq;
15942         struct mlx5_flow_handle dh;
15943         struct mlx5_meter_policy_action_container *act_cnt;
15944         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
15945         uint16_t sub_policy_num;
15946
15947         rte_spinlock_lock(&mtr_policy->sl);
15948         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
15949                 if (!rss_desc[i])
15950                         continue;
15951                 hrxq_idx[i] = mlx5_hrxq_get(dev, rss_desc[i]);
15952                 if (!hrxq_idx[i]) {
15953                         rte_spinlock_unlock(&mtr_policy->sl);
15954                         return NULL;
15955                 }
15956         }
15957         sub_policy_num = (mtr_policy->sub_policy_num >>
15958                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
15959                         MLX5_MTR_SUB_POLICY_NUM_MASK;
15960         for (i = 0; i < sub_policy_num;
15961                 i++) {
15962                 for (j = 0; j < MLX5_MTR_RTE_COLORS; j++) {
15963                         if (rss_desc[j] &&
15964                                 hrxq_idx[j] !=
15965                         mtr_policy->sub_policys[domain][i]->rix_hrxq[j])
15966                                 break;
15967                 }
15968                 if (j >= MLX5_MTR_RTE_COLORS) {
15969                         /*
15970                          * Found the sub policy table with
15971                          * the same queue per color
15972                          */
15973                         rte_spinlock_unlock(&mtr_policy->sl);
15974                         for (j = 0; j < MLX5_MTR_RTE_COLORS; j++)
15975                                 mlx5_hrxq_release(dev, hrxq_idx[j]);
15976                         return mtr_policy->sub_policys[domain][i];
15977                 }
15978         }
15979         /* Create sub policy. */
15980         if (!mtr_policy->sub_policys[domain][0]->rix_hrxq[0]) {
15981                 /* Reuse the first dummy sub_policy*/
15982                 sub_policy = mtr_policy->sub_policys[domain][0];
15983                 sub_policy_idx = sub_policy->idx;
15984         } else {
15985                 sub_policy = mlx5_ipool_zmalloc
15986                                 (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
15987                                 &sub_policy_idx);
15988                 if (!sub_policy ||
15989                         sub_policy_idx > MLX5_MAX_SUB_POLICY_TBL_NUM) {
15990                         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++)
15991                                 mlx5_hrxq_release(dev, hrxq_idx[i]);
15992                         goto rss_sub_policy_error;
15993                 }
15994                 sub_policy->idx = sub_policy_idx;
15995                 sub_policy->main_policy = mtr_policy;
15996         }
15997         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
15998                 if (!rss_desc[i])
15999                         continue;
16000                 sub_policy->rix_hrxq[i] = hrxq_idx[i];
16001                 /*
16002                  * Overwrite the last action from
16003                  * RSS action to Queue action.
16004                  */
16005                 hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
16006                               hrxq_idx[i]);
16007                 if (!hrxq) {
16008                         DRV_LOG(ERR, "Failed to create policy hrxq");
16009                         goto rss_sub_policy_error;
16010                 }
16011                 act_cnt = &mtr_policy->act_cnt[i];
16012                 if (act_cnt->rix_mark || act_cnt->modify_hdr) {
16013                         memset(&dh, 0, sizeof(struct mlx5_flow_handle));
16014                         if (act_cnt->rix_mark)
16015                                 dh.mark = 1;
16016                         dh.fate_action = MLX5_FLOW_FATE_QUEUE;
16017                         dh.rix_hrxq = hrxq_idx[i];
16018                         flow_drv_rxq_flags_set(dev, &dh);
16019                 }
16020         }
16021         if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
16022                 sub_policy, domain)) {
16023                 DRV_LOG(ERR, "Failed to create policy "
16024                         "rules per domain.");
16025                 goto rss_sub_policy_error;
16026         }
16027         if (sub_policy != mtr_policy->sub_policys[domain][0]) {
16028                 i = (mtr_policy->sub_policy_num >>
16029                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16030                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16031                 mtr_policy->sub_policys[domain][i] = sub_policy;
16032                 i++;
16033                 if (i > MLX5_MTR_RSS_MAX_SUB_POLICY)
16034                         goto rss_sub_policy_error;
16035                 mtr_policy->sub_policy_num &= ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
16036                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
16037                 mtr_policy->sub_policy_num |=
16038                         (i & MLX5_MTR_SUB_POLICY_NUM_MASK) <<
16039                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
16040         }
16041         rte_spinlock_unlock(&mtr_policy->sl);
16042         return sub_policy;
16043 rss_sub_policy_error:
16044         if (sub_policy) {
16045                 __flow_dv_destroy_sub_policy_rules(dev, sub_policy);
16046                 if (sub_policy != mtr_policy->sub_policys[domain][0]) {
16047                         i = (mtr_policy->sub_policy_num >>
16048                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16049                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16050                         mtr_policy->sub_policys[domain][i] = NULL;
16051                         mlx5_ipool_free
16052                         (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16053                                         sub_policy->idx);
16054                 }
16055         }
16056         if (sub_policy_idx)
16057                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16058                         sub_policy_idx);
16059         rte_spinlock_unlock(&mtr_policy->sl);
16060         return NULL;
16061 }
16062
16063
16064 /**
16065  * Destroy the sub policy table with RX queue.
16066  *
16067  * @param[in] dev
16068  *   Pointer to Ethernet device.
16069  * @param[in] mtr_policy
16070  *   Pointer to meter policy table.
16071  */
16072 static void
16073 flow_dv_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev,
16074                 struct mlx5_flow_meter_policy *mtr_policy)
16075 {
16076         struct mlx5_priv *priv = dev->data->dev_private;
16077         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
16078         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
16079         uint32_t i, j;
16080         uint16_t sub_policy_num, new_policy_num;
16081
16082         rte_spinlock_lock(&mtr_policy->sl);
16083         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
16084                 switch (mtr_policy->act_cnt[i].fate_action) {
16085                 case MLX5_FLOW_FATE_SHARED_RSS:
16086                         sub_policy_num = (mtr_policy->sub_policy_num >>
16087                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16088                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16089                         new_policy_num = sub_policy_num;
16090                         for (j = 0; j < sub_policy_num; j++) {
16091                                 sub_policy =
16092                                         mtr_policy->sub_policys[domain][j];
16093                                 if (sub_policy) {
16094                                         __flow_dv_destroy_sub_policy_rules(dev,
16095                                                 sub_policy);
16096                                 if (sub_policy !=
16097                                         mtr_policy->sub_policys[domain][0]) {
16098                                         mtr_policy->sub_policys[domain][j] =
16099                                                                 NULL;
16100                                         mlx5_ipool_free
16101                                 (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16102                                                 sub_policy->idx);
16103                                                 new_policy_num--;
16104                                         }
16105                                 }
16106                         }
16107                         if (new_policy_num != sub_policy_num) {
16108                                 mtr_policy->sub_policy_num &=
16109                                 ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
16110                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
16111                                 mtr_policy->sub_policy_num |=
16112                                 (new_policy_num &
16113                                         MLX5_MTR_SUB_POLICY_NUM_MASK) <<
16114                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
16115                         }
16116                         break;
16117                 case MLX5_FLOW_FATE_QUEUE:
16118                         sub_policy = mtr_policy->sub_policys[domain][0];
16119                         __flow_dv_destroy_sub_policy_rules(dev,
16120                                                 sub_policy);
16121                         break;
16122                 default:
16123                         /*Other actions without queue and do nothing*/
16124                         break;
16125                 }
16126         }
16127         rte_spinlock_unlock(&mtr_policy->sl);
16128 }
16129
16130 /**
16131  * Validate the batch counter support in root table.
16132  *
16133  * Create a simple flow with invalid counter and drop action on root table to
16134  * validate if batch counter with offset on root table is supported or not.
16135  *
16136  * @param[in] dev
16137  *   Pointer to rte_eth_dev structure.
16138  *
16139  * @return
16140  *   0 on success, a negative errno value otherwise and rte_errno is set.
16141  */
16142 int
16143 mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
16144 {
16145         struct mlx5_priv *priv = dev->data->dev_private;
16146         struct mlx5_dev_ctx_shared *sh = priv->sh;
16147         struct mlx5_flow_dv_match_params mask = {
16148                 .size = sizeof(mask.buf),
16149         };
16150         struct mlx5_flow_dv_match_params value = {
16151                 .size = sizeof(value.buf),
16152         };
16153         struct mlx5dv_flow_matcher_attr dv_attr = {
16154                 .type = IBV_FLOW_ATTR_NORMAL | IBV_FLOW_ATTR_FLAGS_EGRESS,
16155                 .priority = 0,
16156                 .match_criteria_enable = 0,
16157                 .match_mask = (void *)&mask,
16158         };
16159         void *actions[2] = { 0 };
16160         struct mlx5_flow_tbl_resource *tbl = NULL;
16161         struct mlx5_devx_obj *dcs = NULL;
16162         void *matcher = NULL;
16163         void *flow = NULL;
16164         int ret = -1;
16165
16166         tbl = flow_dv_tbl_resource_get(dev, 0, 1, 0, false, NULL,
16167                                         0, 0, 0, NULL);
16168         if (!tbl)
16169                 goto err;
16170         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
16171         if (!dcs)
16172                 goto err;
16173         ret = mlx5_flow_os_create_flow_action_count(dcs->obj, UINT16_MAX,
16174                                                     &actions[0]);
16175         if (ret)
16176                 goto err;
16177         dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
16178         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
16179                                                &matcher);
16180         if (ret)
16181                 goto err;
16182         ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 1,
16183                                        actions, &flow);
16184 err:
16185         /*
16186          * If batch counter with offset is not supported, the driver will not
16187          * validate the invalid offset value, flow create should success.
16188          * In this case, it means batch counter is not supported in root table.
16189          *
16190          * Otherwise, if flow create is failed, counter offset is supported.
16191          */
16192         if (flow) {
16193                 DRV_LOG(INFO, "Batch counter is not supported in root "
16194                               "table. Switch to fallback mode.");
16195                 rte_errno = ENOTSUP;
16196                 ret = -rte_errno;
16197                 claim_zero(mlx5_flow_os_destroy_flow(flow));
16198         } else {
16199                 /* Check matcher to make sure validate fail at flow create. */
16200                 if (!matcher || (matcher && errno != EINVAL))
16201                         DRV_LOG(ERR, "Unexpected error in counter offset "
16202                                      "support detection");
16203                 ret = 0;
16204         }
16205         if (actions[0])
16206                 claim_zero(mlx5_flow_os_destroy_flow_action(actions[0]));
16207         if (matcher)
16208                 claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
16209         if (tbl)
16210                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
16211         if (dcs)
16212                 claim_zero(mlx5_devx_cmd_destroy(dcs));
16213         return ret;
16214 }
16215
16216 /**
16217  * Query a devx counter.
16218  *
16219  * @param[in] dev
16220  *   Pointer to the Ethernet device structure.
16221  * @param[in] cnt
16222  *   Index to the flow counter.
16223  * @param[in] clear
16224  *   Set to clear the counter statistics.
16225  * @param[out] pkts
16226  *   The statistics value of packets.
16227  * @param[out] bytes
16228  *   The statistics value of bytes.
16229  *
16230  * @return
16231  *   0 on success, otherwise return -1.
16232  */
16233 static int
16234 flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear,
16235                       uint64_t *pkts, uint64_t *bytes)
16236 {
16237         struct mlx5_priv *priv = dev->data->dev_private;
16238         struct mlx5_flow_counter *cnt;
16239         uint64_t inn_pkts, inn_bytes;
16240         int ret;
16241
16242         if (!priv->config.devx)
16243                 return -1;
16244
16245         ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes);
16246         if (ret)
16247                 return -1;
16248         cnt = flow_dv_counter_get_by_idx(dev, counter, NULL);
16249         *pkts = inn_pkts - cnt->hits;
16250         *bytes = inn_bytes - cnt->bytes;
16251         if (clear) {
16252                 cnt->hits = inn_pkts;
16253                 cnt->bytes = inn_bytes;
16254         }
16255         return 0;
16256 }
16257
16258 /**
16259  * Get aged-out flows.
16260  *
16261  * @param[in] dev
16262  *   Pointer to the Ethernet device structure.
16263  * @param[in] context
16264  *   The address of an array of pointers to the aged-out flows contexts.
16265  * @param[in] nb_contexts
16266  *   The length of context array pointers.
16267  * @param[out] error
16268  *   Perform verbose error reporting if not NULL. Initialized in case of
16269  *   error only.
16270  *
16271  * @return
16272  *   how many contexts get in success, otherwise negative errno value.
16273  *   if nb_contexts is 0, return the amount of all aged contexts.
16274  *   if nb_contexts is not 0 , return the amount of aged flows reported
16275  *   in the context array.
16276  * @note: only stub for now
16277  */
16278 static int
16279 flow_get_aged_flows(struct rte_eth_dev *dev,
16280                     void **context,
16281                     uint32_t nb_contexts,
16282                     struct rte_flow_error *error)
16283 {
16284         struct mlx5_priv *priv = dev->data->dev_private;
16285         struct mlx5_age_info *age_info;
16286         struct mlx5_age_param *age_param;
16287         struct mlx5_flow_counter *counter;
16288         struct mlx5_aso_age_action *act;
16289         int nb_flows = 0;
16290
16291         if (nb_contexts && !context)
16292                 return rte_flow_error_set(error, EINVAL,
16293                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
16294                                           NULL, "empty context");
16295         age_info = GET_PORT_AGE_INFO(priv);
16296         rte_spinlock_lock(&age_info->aged_sl);
16297         LIST_FOREACH(act, &age_info->aged_aso, next) {
16298                 nb_flows++;
16299                 if (nb_contexts) {
16300                         context[nb_flows - 1] =
16301                                                 act->age_params.context;
16302                         if (!(--nb_contexts))
16303                                 break;
16304                 }
16305         }
16306         TAILQ_FOREACH(counter, &age_info->aged_counters, next) {
16307                 nb_flows++;
16308                 if (nb_contexts) {
16309                         age_param = MLX5_CNT_TO_AGE(counter);
16310                         context[nb_flows - 1] = age_param->context;
16311                         if (!(--nb_contexts))
16312                                 break;
16313                 }
16314         }
16315         rte_spinlock_unlock(&age_info->aged_sl);
16316         MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
16317         return nb_flows;
16318 }
16319
16320 /*
16321  * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
16322  */
16323 static uint32_t
16324 flow_dv_counter_allocate(struct rte_eth_dev *dev)
16325 {
16326         return flow_dv_counter_alloc(dev, 0);
16327 }
16328
16329 /**
16330  * Validate indirect action.
16331  * Dispatcher for action type specific validation.
16332  *
16333  * @param[in] dev
16334  *   Pointer to the Ethernet device structure.
16335  * @param[in] conf
16336  *   Indirect action configuration.
16337  * @param[in] action
16338  *   The indirect action object to validate.
16339  * @param[out] error
16340  *   Perform verbose error reporting if not NULL. Initialized in case of
16341  *   error only.
16342  *
16343  * @return
16344  *   0 on success, otherwise negative errno value.
16345  */
16346 static int
16347 flow_dv_action_validate(struct rte_eth_dev *dev,
16348                         const struct rte_flow_indir_action_conf *conf,
16349                         const struct rte_flow_action *action,
16350                         struct rte_flow_error *err)
16351 {
16352         struct mlx5_priv *priv = dev->data->dev_private;
16353
16354         RTE_SET_USED(conf);
16355         switch (action->type) {
16356         case RTE_FLOW_ACTION_TYPE_RSS:
16357                 /*
16358                  * priv->obj_ops is set according to driver capabilities.
16359                  * When DevX capabilities are
16360                  * sufficient, it is set to devx_obj_ops.
16361                  * Otherwise, it is set to ibv_obj_ops.
16362                  * ibv_obj_ops doesn't support ind_table_modify operation.
16363                  * In this case the indirect RSS action can't be used.
16364                  */
16365                 if (priv->obj_ops.ind_table_modify == NULL)
16366                         return rte_flow_error_set
16367                                         (err, ENOTSUP,
16368                                          RTE_FLOW_ERROR_TYPE_ACTION,
16369                                          NULL,
16370                                          "Indirect RSS action not supported");
16371                 return mlx5_validate_action_rss(dev, action, err);
16372         case RTE_FLOW_ACTION_TYPE_AGE:
16373                 if (!priv->sh->aso_age_mng)
16374                         return rte_flow_error_set(err, ENOTSUP,
16375                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
16376                                                 NULL,
16377                                                 "Indirect age action not supported");
16378                 return flow_dv_validate_action_age(0, action, dev, err);
16379         case RTE_FLOW_ACTION_TYPE_COUNT:
16380                 /*
16381                  * There are two mechanisms to share the action count.
16382                  * The old mechanism uses the shared field to share, while the
16383                  * new mechanism uses the indirect action API.
16384                  * This validation comes to make sure that the two mechanisms
16385                  * are not combined.
16386                  */
16387                 if (is_shared_action_count(action))
16388                         return rte_flow_error_set(err, ENOTSUP,
16389                                                   RTE_FLOW_ERROR_TYPE_ACTION,
16390                                                   NULL,
16391                                                   "Mix shared and indirect counter is not supported");
16392                 return flow_dv_validate_action_count(dev, true, 0, err);
16393         case RTE_FLOW_ACTION_TYPE_CONNTRACK:
16394                 if (!priv->sh->ct_aso_en)
16395                         return rte_flow_error_set(err, ENOTSUP,
16396                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
16397                                         "ASO CT is not supported");
16398                 return mlx5_validate_action_ct(dev, action->conf, err);
16399         default:
16400                 return rte_flow_error_set(err, ENOTSUP,
16401                                           RTE_FLOW_ERROR_TYPE_ACTION,
16402                                           NULL,
16403                                           "action type not supported");
16404         }
16405 }
16406
16407 /**
16408  * Validate meter policy actions.
16409  * Dispatcher for action type specific validation.
16410  *
16411  * @param[in] dev
16412  *   Pointer to the Ethernet device structure.
16413  * @param[in] action
16414  *   The meter policy action object to validate.
16415  * @param[in] attr
16416  *   Attributes of flow to determine steering domain.
16417  * @param[out] error
16418  *   Perform verbose error reporting if not NULL. Initialized in case of
16419  *   error only.
16420  *
16421  * @return
16422  *   0 on success, otherwise negative errno value.
16423  */
16424 static int
16425 flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev,
16426                         const struct rte_flow_action *actions[RTE_COLORS],
16427                         struct rte_flow_attr *attr,
16428                         bool *is_rss,
16429                         uint8_t *domain_bitmap,
16430                         bool *is_def_policy,
16431                         struct rte_mtr_error *error)
16432 {
16433         struct mlx5_priv *priv = dev->data->dev_private;
16434         struct mlx5_dev_config *dev_conf = &priv->config;
16435         const struct rte_flow_action *act;
16436         uint64_t action_flags = 0;
16437         int actions_n;
16438         int i, ret;
16439         struct rte_flow_error flow_err;
16440         uint8_t domain_color[RTE_COLORS] = {0};
16441         uint8_t def_domain = MLX5_MTR_ALL_DOMAIN_BIT;
16442
16443         if (!priv->config.dv_esw_en)
16444                 def_domain &= ~MLX5_MTR_DOMAIN_TRANSFER_BIT;
16445         *domain_bitmap = def_domain;
16446         if (actions[RTE_COLOR_YELLOW] &&
16447                 actions[RTE_COLOR_YELLOW]->type != RTE_FLOW_ACTION_TYPE_END)
16448                 return -rte_mtr_error_set(error, ENOTSUP,
16449                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
16450                                 NULL,
16451                                 "Yellow color does not support any action.");
16452         if (actions[RTE_COLOR_YELLOW] &&
16453                 actions[RTE_COLOR_YELLOW]->type != RTE_FLOW_ACTION_TYPE_DROP)
16454                 return -rte_mtr_error_set(error, ENOTSUP,
16455                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
16456                                 NULL, "Red color only supports drop action.");
16457         /*
16458          * Check default policy actions:
16459          * Green/Yellow: no action, Red: drop action
16460          */
16461         if ((!actions[RTE_COLOR_GREEN] ||
16462                 actions[RTE_COLOR_GREEN]->type == RTE_FLOW_ACTION_TYPE_END)) {
16463                 *is_def_policy = true;
16464                 return 0;
16465         }
16466         flow_err.message = NULL;
16467         for (i = 0; i < RTE_COLORS; i++) {
16468                 act = actions[i];
16469                 for (action_flags = 0, actions_n = 0;
16470                         act && act->type != RTE_FLOW_ACTION_TYPE_END;
16471                         act++) {
16472                         if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
16473                                 return -rte_mtr_error_set(error, ENOTSUP,
16474                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
16475                                           NULL, "too many actions");
16476                         switch (act->type) {
16477                         case RTE_FLOW_ACTION_TYPE_PORT_ID:
16478                                 if (!priv->config.dv_esw_en)
16479                                         return -rte_mtr_error_set(error,
16480                                         ENOTSUP,
16481                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16482                                         NULL, "PORT action validate check"
16483                                         " fail for ESW disable");
16484                                 ret = flow_dv_validate_action_port_id(dev,
16485                                                 action_flags,
16486                                                 act, attr, &flow_err);
16487                                 if (ret)
16488                                         return -rte_mtr_error_set(error,
16489                                         ENOTSUP,
16490                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16491                                         NULL, flow_err.message ?
16492                                         flow_err.message :
16493                                         "PORT action validate check fail");
16494                                 ++actions_n;
16495                                 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
16496                                 break;
16497                         case RTE_FLOW_ACTION_TYPE_MARK:
16498                                 ret = flow_dv_validate_action_mark(dev, act,
16499                                                            action_flags,
16500                                                            attr, &flow_err);
16501                                 if (ret < 0)
16502                                         return -rte_mtr_error_set(error,
16503                                         ENOTSUP,
16504                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16505                                         NULL, flow_err.message ?
16506                                         flow_err.message :
16507                                         "Mark action validate check fail");
16508                                 if (dev_conf->dv_xmeta_en !=
16509                                         MLX5_XMETA_MODE_LEGACY)
16510                                         return -rte_mtr_error_set(error,
16511                                         ENOTSUP,
16512                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16513                                         NULL, "Extend MARK action is "
16514                                         "not supported. Please try use "
16515                                         "default policy for meter.");
16516                                 action_flags |= MLX5_FLOW_ACTION_MARK;
16517                                 ++actions_n;
16518                                 break;
16519                         case RTE_FLOW_ACTION_TYPE_SET_TAG:
16520                                 ret = flow_dv_validate_action_set_tag(dev,
16521                                                         act, action_flags,
16522                                                         attr, &flow_err);
16523                                 if (ret)
16524                                         return -rte_mtr_error_set(error,
16525                                         ENOTSUP,
16526                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16527                                         NULL, flow_err.message ?
16528                                         flow_err.message :
16529                                         "Set tag action validate check fail");
16530                                 /*
16531                                  * Count all modify-header actions
16532                                  * as one action.
16533                                  */
16534                                 if (!(action_flags &
16535                                         MLX5_FLOW_MODIFY_HDR_ACTIONS))
16536                                         ++actions_n;
16537                                 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
16538                                 break;
16539                         case RTE_FLOW_ACTION_TYPE_DROP:
16540                                 ret = mlx5_flow_validate_action_drop
16541                                         (action_flags,
16542                                         attr, &flow_err);
16543                                 if (ret < 0)
16544                                         return -rte_mtr_error_set(error,
16545                                         ENOTSUP,
16546                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16547                                         NULL, flow_err.message ?
16548                                         flow_err.message :
16549                                         "Drop action validate check fail");
16550                                 action_flags |= MLX5_FLOW_ACTION_DROP;
16551                                 ++actions_n;
16552                                 break;
16553                         case RTE_FLOW_ACTION_TYPE_QUEUE:
16554                                 /*
16555                                  * Check whether extensive
16556                                  * metadata feature is engaged.
16557                                  */
16558                                 if (dev_conf->dv_flow_en &&
16559                                         (dev_conf->dv_xmeta_en !=
16560                                         MLX5_XMETA_MODE_LEGACY) &&
16561                                         mlx5_flow_ext_mreg_supported(dev))
16562                                         return -rte_mtr_error_set(error,
16563                                           ENOTSUP,
16564                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
16565                                           NULL, "Queue action with meta "
16566                                           "is not supported. Please try use "
16567                                           "default policy for meter.");
16568                                 ret = mlx5_flow_validate_action_queue(act,
16569                                                         action_flags, dev,
16570                                                         attr, &flow_err);
16571                                 if (ret < 0)
16572                                         return -rte_mtr_error_set(error,
16573                                           ENOTSUP,
16574                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
16575                                           NULL, flow_err.message ?
16576                                           flow_err.message :
16577                                           "Queue action validate check fail");
16578                                 action_flags |= MLX5_FLOW_ACTION_QUEUE;
16579                                 ++actions_n;
16580                                 break;
16581                         case RTE_FLOW_ACTION_TYPE_RSS:
16582                                 if (dev_conf->dv_flow_en &&
16583                                         (dev_conf->dv_xmeta_en !=
16584                                         MLX5_XMETA_MODE_LEGACY) &&
16585                                         mlx5_flow_ext_mreg_supported(dev))
16586                                         return -rte_mtr_error_set(error,
16587                                           ENOTSUP,
16588                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
16589                                           NULL, "RSS action with meta "
16590                                           "is not supported. Please try use "
16591                                           "default policy for meter.");
16592                                 ret = mlx5_validate_action_rss(dev, act,
16593                                                 &flow_err);
16594                                 if (ret < 0)
16595                                         return -rte_mtr_error_set(error,
16596                                           ENOTSUP,
16597                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
16598                                           NULL, flow_err.message ?
16599                                           flow_err.message :
16600                                           "RSS action validate check fail");
16601                                 action_flags |= MLX5_FLOW_ACTION_RSS;
16602                                 ++actions_n;
16603                                 *is_rss = true;
16604                                 break;
16605                         case RTE_FLOW_ACTION_TYPE_JUMP:
16606                                 ret = flow_dv_validate_action_jump(dev,
16607                                         NULL, act, action_flags,
16608                                         attr, true, &flow_err);
16609                                 if (ret)
16610                                         return -rte_mtr_error_set(error,
16611                                           ENOTSUP,
16612                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
16613                                           NULL, flow_err.message ?
16614                                           flow_err.message :
16615                                           "Jump action validate check fail");
16616                                 ++actions_n;
16617                                 action_flags |= MLX5_FLOW_ACTION_JUMP;
16618                                 break;
16619                         default:
16620                                 return -rte_mtr_error_set(error, ENOTSUP,
16621                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16622                                         NULL,
16623                                         "Doesn't support optional action");
16624                         }
16625                 }
16626                 /* Yellow is not supported, just skip. */
16627                 if (i == RTE_COLOR_YELLOW)
16628                         continue;
16629                 if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
16630                         domain_color[i] = MLX5_MTR_DOMAIN_TRANSFER_BIT;
16631                 else if ((action_flags &
16632                         (MLX5_FLOW_ACTION_RSS | MLX5_FLOW_ACTION_QUEUE)) ||
16633                         (action_flags & MLX5_FLOW_ACTION_MARK))
16634                         /*
16635                          * Only support MLX5_XMETA_MODE_LEGACY
16636                          * so MARK action only in ingress domain.
16637                          */
16638                         domain_color[i] = MLX5_MTR_DOMAIN_INGRESS_BIT;
16639                 else
16640                         domain_color[i] = def_domain;
16641                 /*
16642                  * Validate the drop action mutual exclusion
16643                  * with other actions. Drop action is mutually-exclusive
16644                  * with any other action, except for Count action.
16645                  */
16646                 if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
16647                         (action_flags & ~MLX5_FLOW_ACTION_DROP)) {
16648                         return -rte_mtr_error_set(error, ENOTSUP,
16649                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
16650                                 NULL, "Drop action is mutually-exclusive "
16651                                 "with any other action");
16652                 }
16653                 /* Eswitch has few restrictions on using items and actions */
16654                 if (domain_color[i] & MLX5_MTR_DOMAIN_TRANSFER_BIT) {
16655                         if (!mlx5_flow_ext_mreg_supported(dev) &&
16656                                 action_flags & MLX5_FLOW_ACTION_MARK)
16657                                 return -rte_mtr_error_set(error, ENOTSUP,
16658                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16659                                         NULL, "unsupported action MARK");
16660                         if (action_flags & MLX5_FLOW_ACTION_QUEUE)
16661                                 return -rte_mtr_error_set(error, ENOTSUP,
16662                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16663                                         NULL, "unsupported action QUEUE");
16664                         if (action_flags & MLX5_FLOW_ACTION_RSS)
16665                                 return -rte_mtr_error_set(error, ENOTSUP,
16666                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16667                                         NULL, "unsupported action RSS");
16668                         if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
16669                                 return -rte_mtr_error_set(error, ENOTSUP,
16670                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16671                                         NULL, "no fate action is found");
16672                 } else {
16673                         if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) &&
16674                                 (domain_color[i] &
16675                                 MLX5_MTR_DOMAIN_INGRESS_BIT)) {
16676                                 if ((domain_color[i] &
16677                                         MLX5_MTR_DOMAIN_EGRESS_BIT))
16678                                         domain_color[i] =
16679                                         MLX5_MTR_DOMAIN_EGRESS_BIT;
16680                                 else
16681                                         return -rte_mtr_error_set(error,
16682                                         ENOTSUP,
16683                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16684                                         NULL, "no fate action is found");
16685                         }
16686                 }
16687                 if (domain_color[i] != def_domain)
16688                         *domain_bitmap = domain_color[i];
16689         }
16690         return 0;
16691 }
16692
16693 static int
16694 flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags)
16695 {
16696         struct mlx5_priv *priv = dev->data->dev_private;
16697         int ret = 0;
16698
16699         if ((domains & MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain != NULL) {
16700                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->rx_domain,
16701                                                 flags);
16702                 if (ret != 0)
16703                         return ret;
16704         }
16705         if ((domains & MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain != NULL) {
16706                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->tx_domain, flags);
16707                 if (ret != 0)
16708                         return ret;
16709         }
16710         if ((domains & MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain != NULL) {
16711                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->fdb_domain, flags);
16712                 if (ret != 0)
16713                         return ret;
16714         }
16715         return 0;
16716 }
16717
16718 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
16719         .validate = flow_dv_validate,
16720         .prepare = flow_dv_prepare,
16721         .translate = flow_dv_translate,
16722         .apply = flow_dv_apply,
16723         .remove = flow_dv_remove,
16724         .destroy = flow_dv_destroy,
16725         .query = flow_dv_query,
16726         .create_mtr_tbls = flow_dv_create_mtr_tbls,
16727         .destroy_mtr_tbls = flow_dv_destroy_mtr_tbls,
16728         .destroy_mtr_drop_tbls = flow_dv_destroy_mtr_drop_tbls,
16729         .create_meter = flow_dv_mtr_alloc,
16730         .free_meter = flow_dv_aso_mtr_release_to_pool,
16731         .validate_mtr_acts = flow_dv_validate_mtr_policy_acts,
16732         .create_mtr_acts = flow_dv_create_mtr_policy_acts,
16733         .destroy_mtr_acts = flow_dv_destroy_mtr_policy_acts,
16734         .create_policy_rules = flow_dv_create_policy_rules,
16735         .destroy_policy_rules = flow_dv_destroy_policy_rules,
16736         .create_def_policy = flow_dv_create_def_policy,
16737         .destroy_def_policy = flow_dv_destroy_def_policy,
16738         .meter_sub_policy_rss_prepare = flow_dv_meter_sub_policy_rss_prepare,
16739         .destroy_sub_policy_with_rxq = flow_dv_destroy_sub_policy_with_rxq,
16740         .counter_alloc = flow_dv_counter_allocate,
16741         .counter_free = flow_dv_counter_free,
16742         .counter_query = flow_dv_counter_query,
16743         .get_aged_flows = flow_get_aged_flows,
16744         .action_validate = flow_dv_action_validate,
16745         .action_create = flow_dv_action_create,
16746         .action_destroy = flow_dv_action_destroy,
16747         .action_update = flow_dv_action_update,
16748         .action_query = flow_dv_action_query,
16749         .sync_domain = flow_dv_sync_domain,
16750 };
16751
16752 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
16753