acb9af4e61ab15eae633025260239188569b381c
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <ethdev_driver.h>
14 #include <rte_flow.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_cycles.h>
18 #include <rte_ip.h>
19 #include <rte_gre.h>
20 #include <rte_vxlan.h>
21 #include <rte_gtp.h>
22 #include <rte_eal_paging.h>
23 #include <rte_mpls.h>
24 #include <rte_mtr.h>
25 #include <rte_mtr_driver.h>
26
27 #include <mlx5_glue.h>
28 #include <mlx5_devx_cmds.h>
29 #include <mlx5_prm.h>
30 #include <mlx5_malloc.h>
31
32 #include "mlx5_defs.h"
33 #include "mlx5.h"
34 #include "mlx5_common_os.h"
35 #include "mlx5_flow.h"
36 #include "mlx5_flow_os.h"
37 #include "mlx5_rx.h"
38 #include "mlx5_tx.h"
39 #include "rte_pmd_mlx5.h"
40
41 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
42
43 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
44 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
45 #endif
46
47 #ifndef HAVE_MLX5DV_DR_ESWITCH
48 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
49 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
50 #endif
51 #endif
52
53 #ifndef HAVE_MLX5DV_DR
54 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
55 #endif
56
57 /* VLAN header definitions */
58 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
59 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
60 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
61 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
62 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
63
64 union flow_dv_attr {
65         struct {
66                 uint32_t valid:1;
67                 uint32_t ipv4:1;
68                 uint32_t ipv6:1;
69                 uint32_t tcp:1;
70                 uint32_t udp:1;
71                 uint32_t reserved:27;
72         };
73         uint32_t attr;
74 };
75
76 static int
77 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
78                              struct mlx5_flow_tbl_resource *tbl);
79
80 static int
81 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
82                                      uint32_t encap_decap_idx);
83
84 static int
85 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
86                                         uint32_t port_id);
87 static void
88 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss);
89
90 static int
91 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
92                                   uint32_t rix_jump);
93
94 /**
95  * Initialize flow attributes structure according to flow items' types.
96  *
97  * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel
98  * mode. For tunnel mode, the items to be modified are the outermost ones.
99  *
100  * @param[in] item
101  *   Pointer to item specification.
102  * @param[out] attr
103  *   Pointer to flow attributes structure.
104  * @param[in] dev_flow
105  *   Pointer to the sub flow.
106  * @param[in] tunnel_decap
107  *   Whether action is after tunnel decapsulation.
108  */
109 static void
110 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
111                   struct mlx5_flow *dev_flow, bool tunnel_decap)
112 {
113         uint64_t layers = dev_flow->handle->layers;
114
115         /*
116          * If layers is already initialized, it means this dev_flow is the
117          * suffix flow, the layers flags is set by the prefix flow. Need to
118          * use the layer flags from prefix flow as the suffix flow may not
119          * have the user defined items as the flow is split.
120          */
121         if (layers) {
122                 if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
123                         attr->ipv4 = 1;
124                 else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
125                         attr->ipv6 = 1;
126                 if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
127                         attr->tcp = 1;
128                 else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
129                         attr->udp = 1;
130                 attr->valid = 1;
131                 return;
132         }
133         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
134                 uint8_t next_protocol = 0xff;
135                 switch (item->type) {
136                 case RTE_FLOW_ITEM_TYPE_GRE:
137                 case RTE_FLOW_ITEM_TYPE_NVGRE:
138                 case RTE_FLOW_ITEM_TYPE_VXLAN:
139                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
140                 case RTE_FLOW_ITEM_TYPE_GENEVE:
141                 case RTE_FLOW_ITEM_TYPE_MPLS:
142                         if (tunnel_decap)
143                                 attr->attr = 0;
144                         break;
145                 case RTE_FLOW_ITEM_TYPE_IPV4:
146                         if (!attr->ipv6)
147                                 attr->ipv4 = 1;
148                         if (item->mask != NULL &&
149                             ((const struct rte_flow_item_ipv4 *)
150                             item->mask)->hdr.next_proto_id)
151                                 next_protocol =
152                                     ((const struct rte_flow_item_ipv4 *)
153                                       (item->spec))->hdr.next_proto_id &
154                                     ((const struct rte_flow_item_ipv4 *)
155                                       (item->mask))->hdr.next_proto_id;
156                         if ((next_protocol == IPPROTO_IPIP ||
157                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
158                                 attr->attr = 0;
159                         break;
160                 case RTE_FLOW_ITEM_TYPE_IPV6:
161                         if (!attr->ipv4)
162                                 attr->ipv6 = 1;
163                         if (item->mask != NULL &&
164                             ((const struct rte_flow_item_ipv6 *)
165                             item->mask)->hdr.proto)
166                                 next_protocol =
167                                     ((const struct rte_flow_item_ipv6 *)
168                                       (item->spec))->hdr.proto &
169                                     ((const struct rte_flow_item_ipv6 *)
170                                       (item->mask))->hdr.proto;
171                         if ((next_protocol == IPPROTO_IPIP ||
172                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
173                                 attr->attr = 0;
174                         break;
175                 case RTE_FLOW_ITEM_TYPE_UDP:
176                         if (!attr->tcp)
177                                 attr->udp = 1;
178                         break;
179                 case RTE_FLOW_ITEM_TYPE_TCP:
180                         if (!attr->udp)
181                                 attr->tcp = 1;
182                         break;
183                 default:
184                         break;
185                 }
186         }
187         attr->valid = 1;
188 }
189
190 /**
191  * Convert rte_mtr_color to mlx5 color.
192  *
193  * @param[in] rcol
194  *   rte_mtr_color.
195  *
196  * @return
197  *   mlx5 color.
198  */
199 static int
200 rte_col_2_mlx5_col(enum rte_color rcol)
201 {
202         switch (rcol) {
203         case RTE_COLOR_GREEN:
204                 return MLX5_FLOW_COLOR_GREEN;
205         case RTE_COLOR_YELLOW:
206                 return MLX5_FLOW_COLOR_YELLOW;
207         case RTE_COLOR_RED:
208                 return MLX5_FLOW_COLOR_RED;
209         default:
210                 break;
211         }
212         return MLX5_FLOW_COLOR_UNDEFINED;
213 }
214
215 struct field_modify_info {
216         uint32_t size; /* Size of field in protocol header, in bytes. */
217         uint32_t offset; /* Offset of field in protocol header, in bytes. */
218         enum mlx5_modification_field id;
219 };
220
221 struct field_modify_info modify_eth[] = {
222         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
223         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
224         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
225         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
226         {0, 0, 0},
227 };
228
229 struct field_modify_info modify_vlan_out_first_vid[] = {
230         /* Size in bits !!! */
231         {12, 0, MLX5_MODI_OUT_FIRST_VID},
232         {0, 0, 0},
233 };
234
235 struct field_modify_info modify_ipv4[] = {
236         {1,  1, MLX5_MODI_OUT_IP_DSCP},
237         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
238         {4, 12, MLX5_MODI_OUT_SIPV4},
239         {4, 16, MLX5_MODI_OUT_DIPV4},
240         {0, 0, 0},
241 };
242
243 struct field_modify_info modify_ipv6[] = {
244         {1,  0, MLX5_MODI_OUT_IP_DSCP},
245         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
246         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
247         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
248         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
249         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
250         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
251         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
252         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
253         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
254         {0, 0, 0},
255 };
256
257 struct field_modify_info modify_udp[] = {
258         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
259         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
260         {0, 0, 0},
261 };
262
263 struct field_modify_info modify_tcp[] = {
264         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
265         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
266         {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
267         {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
268         {0, 0, 0},
269 };
270
271 static const struct rte_flow_item *
272 mlx5_flow_find_tunnel_item(const struct rte_flow_item *item)
273 {
274         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
275                 switch (item->type) {
276                 default:
277                         break;
278                 case RTE_FLOW_ITEM_TYPE_VXLAN:
279                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
280                 case RTE_FLOW_ITEM_TYPE_GRE:
281                 case RTE_FLOW_ITEM_TYPE_MPLS:
282                 case RTE_FLOW_ITEM_TYPE_NVGRE:
283                 case RTE_FLOW_ITEM_TYPE_GENEVE:
284                         return item;
285                 case RTE_FLOW_ITEM_TYPE_IPV4:
286                 case RTE_FLOW_ITEM_TYPE_IPV6:
287                         if (item[1].type == RTE_FLOW_ITEM_TYPE_IPV4 ||
288                             item[1].type == RTE_FLOW_ITEM_TYPE_IPV6)
289                                 return item;
290                         break;
291                 }
292         }
293         return NULL;
294 }
295
296 static void
297 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
298                           uint8_t next_protocol, uint64_t *item_flags,
299                           int *tunnel)
300 {
301         MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
302                     item->type == RTE_FLOW_ITEM_TYPE_IPV6);
303         if (next_protocol == IPPROTO_IPIP) {
304                 *item_flags |= MLX5_FLOW_LAYER_IPIP;
305                 *tunnel = 1;
306         }
307         if (next_protocol == IPPROTO_IPV6) {
308                 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
309                 *tunnel = 1;
310         }
311 }
312
313 /* Update VLAN's VID/PCP based on input rte_flow_action.
314  *
315  * @param[in] action
316  *   Pointer to struct rte_flow_action.
317  * @param[out] vlan
318  *   Pointer to struct rte_vlan_hdr.
319  */
320 static void
321 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
322                          struct rte_vlan_hdr *vlan)
323 {
324         uint16_t vlan_tci;
325         if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
326                 vlan_tci =
327                     ((const struct rte_flow_action_of_set_vlan_pcp *)
328                                                action->conf)->vlan_pcp;
329                 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
330                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
331                 vlan->vlan_tci |= vlan_tci;
332         } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
333                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
334                 vlan->vlan_tci |= rte_be_to_cpu_16
335                     (((const struct rte_flow_action_of_set_vlan_vid *)
336                                              action->conf)->vlan_vid);
337         }
338 }
339
340 /**
341  * Fetch 1, 2, 3 or 4 byte field from the byte array
342  * and return as unsigned integer in host-endian format.
343  *
344  * @param[in] data
345  *   Pointer to data array.
346  * @param[in] size
347  *   Size of field to extract.
348  *
349  * @return
350  *   converted field in host endian format.
351  */
352 static inline uint32_t
353 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
354 {
355         uint32_t ret;
356
357         switch (size) {
358         case 1:
359                 ret = *data;
360                 break;
361         case 2:
362                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
363                 break;
364         case 3:
365                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
366                 ret = (ret << 8) | *(data + sizeof(uint16_t));
367                 break;
368         case 4:
369                 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
370                 break;
371         default:
372                 MLX5_ASSERT(false);
373                 ret = 0;
374                 break;
375         }
376         return ret;
377 }
378
379 /**
380  * Convert modify-header action to DV specification.
381  *
382  * Data length of each action is determined by provided field description
383  * and the item mask. Data bit offset and width of each action is determined
384  * by provided item mask.
385  *
386  * @param[in] item
387  *   Pointer to item specification.
388  * @param[in] field
389  *   Pointer to field modification information.
390  *     For MLX5_MODIFICATION_TYPE_SET specifies destination field.
391  *     For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
392  *     For MLX5_MODIFICATION_TYPE_COPY specifies source field.
393  * @param[in] dcopy
394  *   Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
395  *   Negative offset value sets the same offset as source offset.
396  *   size field is ignored, value is taken from source field.
397  * @param[in,out] resource
398  *   Pointer to the modify-header resource.
399  * @param[in] type
400  *   Type of modification.
401  * @param[out] error
402  *   Pointer to the error structure.
403  *
404  * @return
405  *   0 on success, a negative errno value otherwise and rte_errno is set.
406  */
407 static int
408 flow_dv_convert_modify_action(struct rte_flow_item *item,
409                               struct field_modify_info *field,
410                               struct field_modify_info *dcopy,
411                               struct mlx5_flow_dv_modify_hdr_resource *resource,
412                               uint32_t type, struct rte_flow_error *error)
413 {
414         uint32_t i = resource->actions_num;
415         struct mlx5_modification_cmd *actions = resource->actions;
416
417         /*
418          * The item and mask are provided in big-endian format.
419          * The fields should be presented as in big-endian format either.
420          * Mask must be always present, it defines the actual field width.
421          */
422         MLX5_ASSERT(item->mask);
423         MLX5_ASSERT(field->size);
424         do {
425                 unsigned int size_b;
426                 unsigned int off_b;
427                 uint32_t mask;
428                 uint32_t data;
429
430                 if (i >= MLX5_MAX_MODIFY_NUM)
431                         return rte_flow_error_set(error, EINVAL,
432                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
433                                  "too many items to modify");
434                 /* Fetch variable byte size mask from the array. */
435                 mask = flow_dv_fetch_field((const uint8_t *)item->mask +
436                                            field->offset, field->size);
437                 if (!mask) {
438                         ++field;
439                         continue;
440                 }
441                 /* Deduce actual data width in bits from mask value. */
442                 off_b = rte_bsf32(mask);
443                 size_b = sizeof(uint32_t) * CHAR_BIT -
444                          off_b - __builtin_clz(mask);
445                 MLX5_ASSERT(size_b);
446                 size_b = size_b == sizeof(uint32_t) * CHAR_BIT ? 0 : size_b;
447                 actions[i] = (struct mlx5_modification_cmd) {
448                         .action_type = type,
449                         .field = field->id,
450                         .offset = off_b,
451                         .length = size_b,
452                 };
453                 /* Convert entire record to expected big-endian format. */
454                 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
455                 if (type == MLX5_MODIFICATION_TYPE_COPY) {
456                         MLX5_ASSERT(dcopy);
457                         actions[i].dst_field = dcopy->id;
458                         actions[i].dst_offset =
459                                 (int)dcopy->offset < 0 ? off_b : dcopy->offset;
460                         /* Convert entire record to big-endian format. */
461                         actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
462                         ++dcopy;
463                 } else {
464                         MLX5_ASSERT(item->spec);
465                         data = flow_dv_fetch_field((const uint8_t *)item->spec +
466                                                    field->offset, field->size);
467                         /* Shift out the trailing masked bits from data. */
468                         data = (data & mask) >> off_b;
469                         actions[i].data1 = rte_cpu_to_be_32(data);
470                 }
471                 ++i;
472                 ++field;
473         } while (field->size);
474         if (resource->actions_num == i)
475                 return rte_flow_error_set(error, EINVAL,
476                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
477                                           "invalid modification flow item");
478         resource->actions_num = i;
479         return 0;
480 }
481
482 /**
483  * Convert modify-header set IPv4 address action to DV specification.
484  *
485  * @param[in,out] resource
486  *   Pointer to the modify-header resource.
487  * @param[in] action
488  *   Pointer to action specification.
489  * @param[out] error
490  *   Pointer to the error structure.
491  *
492  * @return
493  *   0 on success, a negative errno value otherwise and rte_errno is set.
494  */
495 static int
496 flow_dv_convert_action_modify_ipv4
497                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
498                          const struct rte_flow_action *action,
499                          struct rte_flow_error *error)
500 {
501         const struct rte_flow_action_set_ipv4 *conf =
502                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
503         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
504         struct rte_flow_item_ipv4 ipv4;
505         struct rte_flow_item_ipv4 ipv4_mask;
506
507         memset(&ipv4, 0, sizeof(ipv4));
508         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
509         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
510                 ipv4.hdr.src_addr = conf->ipv4_addr;
511                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
512         } else {
513                 ipv4.hdr.dst_addr = conf->ipv4_addr;
514                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
515         }
516         item.spec = &ipv4;
517         item.mask = &ipv4_mask;
518         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
519                                              MLX5_MODIFICATION_TYPE_SET, error);
520 }
521
522 /**
523  * Convert modify-header set IPv6 address action to DV specification.
524  *
525  * @param[in,out] resource
526  *   Pointer to the modify-header resource.
527  * @param[in] action
528  *   Pointer to action specification.
529  * @param[out] error
530  *   Pointer to the error structure.
531  *
532  * @return
533  *   0 on success, a negative errno value otherwise and rte_errno is set.
534  */
535 static int
536 flow_dv_convert_action_modify_ipv6
537                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
538                          const struct rte_flow_action *action,
539                          struct rte_flow_error *error)
540 {
541         const struct rte_flow_action_set_ipv6 *conf =
542                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
543         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
544         struct rte_flow_item_ipv6 ipv6;
545         struct rte_flow_item_ipv6 ipv6_mask;
546
547         memset(&ipv6, 0, sizeof(ipv6));
548         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
549         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
550                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
551                        sizeof(ipv6.hdr.src_addr));
552                 memcpy(&ipv6_mask.hdr.src_addr,
553                        &rte_flow_item_ipv6_mask.hdr.src_addr,
554                        sizeof(ipv6.hdr.src_addr));
555         } else {
556                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
557                        sizeof(ipv6.hdr.dst_addr));
558                 memcpy(&ipv6_mask.hdr.dst_addr,
559                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
560                        sizeof(ipv6.hdr.dst_addr));
561         }
562         item.spec = &ipv6;
563         item.mask = &ipv6_mask;
564         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
565                                              MLX5_MODIFICATION_TYPE_SET, error);
566 }
567
568 /**
569  * Convert modify-header set MAC address action to DV specification.
570  *
571  * @param[in,out] resource
572  *   Pointer to the modify-header resource.
573  * @param[in] action
574  *   Pointer to action specification.
575  * @param[out] error
576  *   Pointer to the error structure.
577  *
578  * @return
579  *   0 on success, a negative errno value otherwise and rte_errno is set.
580  */
581 static int
582 flow_dv_convert_action_modify_mac
583                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
584                          const struct rte_flow_action *action,
585                          struct rte_flow_error *error)
586 {
587         const struct rte_flow_action_set_mac *conf =
588                 (const struct rte_flow_action_set_mac *)(action->conf);
589         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
590         struct rte_flow_item_eth eth;
591         struct rte_flow_item_eth eth_mask;
592
593         memset(&eth, 0, sizeof(eth));
594         memset(&eth_mask, 0, sizeof(eth_mask));
595         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
596                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
597                        sizeof(eth.src.addr_bytes));
598                 memcpy(&eth_mask.src.addr_bytes,
599                        &rte_flow_item_eth_mask.src.addr_bytes,
600                        sizeof(eth_mask.src.addr_bytes));
601         } else {
602                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
603                        sizeof(eth.dst.addr_bytes));
604                 memcpy(&eth_mask.dst.addr_bytes,
605                        &rte_flow_item_eth_mask.dst.addr_bytes,
606                        sizeof(eth_mask.dst.addr_bytes));
607         }
608         item.spec = &eth;
609         item.mask = &eth_mask;
610         return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
611                                              MLX5_MODIFICATION_TYPE_SET, error);
612 }
613
614 /**
615  * Convert modify-header set VLAN VID action to DV specification.
616  *
617  * @param[in,out] resource
618  *   Pointer to the modify-header resource.
619  * @param[in] action
620  *   Pointer to action specification.
621  * @param[out] error
622  *   Pointer to the error structure.
623  *
624  * @return
625  *   0 on success, a negative errno value otherwise and rte_errno is set.
626  */
627 static int
628 flow_dv_convert_action_modify_vlan_vid
629                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
630                          const struct rte_flow_action *action,
631                          struct rte_flow_error *error)
632 {
633         const struct rte_flow_action_of_set_vlan_vid *conf =
634                 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
635         int i = resource->actions_num;
636         struct mlx5_modification_cmd *actions = resource->actions;
637         struct field_modify_info *field = modify_vlan_out_first_vid;
638
639         if (i >= MLX5_MAX_MODIFY_NUM)
640                 return rte_flow_error_set(error, EINVAL,
641                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
642                          "too many items to modify");
643         actions[i] = (struct mlx5_modification_cmd) {
644                 .action_type = MLX5_MODIFICATION_TYPE_SET,
645                 .field = field->id,
646                 .length = field->size,
647                 .offset = field->offset,
648         };
649         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
650         actions[i].data1 = conf->vlan_vid;
651         actions[i].data1 = actions[i].data1 << 16;
652         resource->actions_num = ++i;
653         return 0;
654 }
655
656 /**
657  * Convert modify-header set TP action to DV specification.
658  *
659  * @param[in,out] resource
660  *   Pointer to the modify-header resource.
661  * @param[in] action
662  *   Pointer to action specification.
663  * @param[in] items
664  *   Pointer to rte_flow_item objects list.
665  * @param[in] attr
666  *   Pointer to flow attributes structure.
667  * @param[in] dev_flow
668  *   Pointer to the sub flow.
669  * @param[in] tunnel_decap
670  *   Whether action is after tunnel decapsulation.
671  * @param[out] error
672  *   Pointer to the error structure.
673  *
674  * @return
675  *   0 on success, a negative errno value otherwise and rte_errno is set.
676  */
677 static int
678 flow_dv_convert_action_modify_tp
679                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
680                          const struct rte_flow_action *action,
681                          const struct rte_flow_item *items,
682                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
683                          bool tunnel_decap, struct rte_flow_error *error)
684 {
685         const struct rte_flow_action_set_tp *conf =
686                 (const struct rte_flow_action_set_tp *)(action->conf);
687         struct rte_flow_item item;
688         struct rte_flow_item_udp udp;
689         struct rte_flow_item_udp udp_mask;
690         struct rte_flow_item_tcp tcp;
691         struct rte_flow_item_tcp tcp_mask;
692         struct field_modify_info *field;
693
694         if (!attr->valid)
695                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
696         if (attr->udp) {
697                 memset(&udp, 0, sizeof(udp));
698                 memset(&udp_mask, 0, sizeof(udp_mask));
699                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
700                         udp.hdr.src_port = conf->port;
701                         udp_mask.hdr.src_port =
702                                         rte_flow_item_udp_mask.hdr.src_port;
703                 } else {
704                         udp.hdr.dst_port = conf->port;
705                         udp_mask.hdr.dst_port =
706                                         rte_flow_item_udp_mask.hdr.dst_port;
707                 }
708                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
709                 item.spec = &udp;
710                 item.mask = &udp_mask;
711                 field = modify_udp;
712         } else {
713                 MLX5_ASSERT(attr->tcp);
714                 memset(&tcp, 0, sizeof(tcp));
715                 memset(&tcp_mask, 0, sizeof(tcp_mask));
716                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
717                         tcp.hdr.src_port = conf->port;
718                         tcp_mask.hdr.src_port =
719                                         rte_flow_item_tcp_mask.hdr.src_port;
720                 } else {
721                         tcp.hdr.dst_port = conf->port;
722                         tcp_mask.hdr.dst_port =
723                                         rte_flow_item_tcp_mask.hdr.dst_port;
724                 }
725                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
726                 item.spec = &tcp;
727                 item.mask = &tcp_mask;
728                 field = modify_tcp;
729         }
730         return flow_dv_convert_modify_action(&item, field, NULL, resource,
731                                              MLX5_MODIFICATION_TYPE_SET, error);
732 }
733
734 /**
735  * Convert modify-header set TTL action to DV specification.
736  *
737  * @param[in,out] resource
738  *   Pointer to the modify-header resource.
739  * @param[in] action
740  *   Pointer to action specification.
741  * @param[in] items
742  *   Pointer to rte_flow_item objects list.
743  * @param[in] attr
744  *   Pointer to flow attributes structure.
745  * @param[in] dev_flow
746  *   Pointer to the sub flow.
747  * @param[in] tunnel_decap
748  *   Whether action is after tunnel decapsulation.
749  * @param[out] error
750  *   Pointer to the error structure.
751  *
752  * @return
753  *   0 on success, a negative errno value otherwise and rte_errno is set.
754  */
755 static int
756 flow_dv_convert_action_modify_ttl
757                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
758                          const struct rte_flow_action *action,
759                          const struct rte_flow_item *items,
760                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
761                          bool tunnel_decap, struct rte_flow_error *error)
762 {
763         const struct rte_flow_action_set_ttl *conf =
764                 (const struct rte_flow_action_set_ttl *)(action->conf);
765         struct rte_flow_item item;
766         struct rte_flow_item_ipv4 ipv4;
767         struct rte_flow_item_ipv4 ipv4_mask;
768         struct rte_flow_item_ipv6 ipv6;
769         struct rte_flow_item_ipv6 ipv6_mask;
770         struct field_modify_info *field;
771
772         if (!attr->valid)
773                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
774         if (attr->ipv4) {
775                 memset(&ipv4, 0, sizeof(ipv4));
776                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
777                 ipv4.hdr.time_to_live = conf->ttl_value;
778                 ipv4_mask.hdr.time_to_live = 0xFF;
779                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
780                 item.spec = &ipv4;
781                 item.mask = &ipv4_mask;
782                 field = modify_ipv4;
783         } else {
784                 MLX5_ASSERT(attr->ipv6);
785                 memset(&ipv6, 0, sizeof(ipv6));
786                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
787                 ipv6.hdr.hop_limits = conf->ttl_value;
788                 ipv6_mask.hdr.hop_limits = 0xFF;
789                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
790                 item.spec = &ipv6;
791                 item.mask = &ipv6_mask;
792                 field = modify_ipv6;
793         }
794         return flow_dv_convert_modify_action(&item, field, NULL, resource,
795                                              MLX5_MODIFICATION_TYPE_SET, error);
796 }
797
798 /**
799  * Convert modify-header decrement TTL action to DV specification.
800  *
801  * @param[in,out] resource
802  *   Pointer to the modify-header resource.
803  * @param[in] action
804  *   Pointer to action specification.
805  * @param[in] items
806  *   Pointer to rte_flow_item objects list.
807  * @param[in] attr
808  *   Pointer to flow attributes structure.
809  * @param[in] dev_flow
810  *   Pointer to the sub flow.
811  * @param[in] tunnel_decap
812  *   Whether action is after tunnel decapsulation.
813  * @param[out] error
814  *   Pointer to the error structure.
815  *
816  * @return
817  *   0 on success, a negative errno value otherwise and rte_errno is set.
818  */
819 static int
820 flow_dv_convert_action_modify_dec_ttl
821                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
822                          const struct rte_flow_item *items,
823                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
824                          bool tunnel_decap, struct rte_flow_error *error)
825 {
826         struct rte_flow_item item;
827         struct rte_flow_item_ipv4 ipv4;
828         struct rte_flow_item_ipv4 ipv4_mask;
829         struct rte_flow_item_ipv6 ipv6;
830         struct rte_flow_item_ipv6 ipv6_mask;
831         struct field_modify_info *field;
832
833         if (!attr->valid)
834                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
835         if (attr->ipv4) {
836                 memset(&ipv4, 0, sizeof(ipv4));
837                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
838                 ipv4.hdr.time_to_live = 0xFF;
839                 ipv4_mask.hdr.time_to_live = 0xFF;
840                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
841                 item.spec = &ipv4;
842                 item.mask = &ipv4_mask;
843                 field = modify_ipv4;
844         } else {
845                 MLX5_ASSERT(attr->ipv6);
846                 memset(&ipv6, 0, sizeof(ipv6));
847                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
848                 ipv6.hdr.hop_limits = 0xFF;
849                 ipv6_mask.hdr.hop_limits = 0xFF;
850                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
851                 item.spec = &ipv6;
852                 item.mask = &ipv6_mask;
853                 field = modify_ipv6;
854         }
855         return flow_dv_convert_modify_action(&item, field, NULL, resource,
856                                              MLX5_MODIFICATION_TYPE_ADD, error);
857 }
858
859 /**
860  * Convert modify-header increment/decrement TCP Sequence number
861  * to DV specification.
862  *
863  * @param[in,out] resource
864  *   Pointer to the modify-header resource.
865  * @param[in] action
866  *   Pointer to action specification.
867  * @param[out] error
868  *   Pointer to the error structure.
869  *
870  * @return
871  *   0 on success, a negative errno value otherwise and rte_errno is set.
872  */
873 static int
874 flow_dv_convert_action_modify_tcp_seq
875                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
876                          const struct rte_flow_action *action,
877                          struct rte_flow_error *error)
878 {
879         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
880         uint64_t value = rte_be_to_cpu_32(*conf);
881         struct rte_flow_item item;
882         struct rte_flow_item_tcp tcp;
883         struct rte_flow_item_tcp tcp_mask;
884
885         memset(&tcp, 0, sizeof(tcp));
886         memset(&tcp_mask, 0, sizeof(tcp_mask));
887         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
888                 /*
889                  * The HW has no decrement operation, only increment operation.
890                  * To simulate decrement X from Y using increment operation
891                  * we need to add UINT32_MAX X times to Y.
892                  * Each adding of UINT32_MAX decrements Y by 1.
893                  */
894                 value *= UINT32_MAX;
895         tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
896         tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
897         item.type = RTE_FLOW_ITEM_TYPE_TCP;
898         item.spec = &tcp;
899         item.mask = &tcp_mask;
900         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
901                                              MLX5_MODIFICATION_TYPE_ADD, error);
902 }
903
904 /**
905  * Convert modify-header increment/decrement TCP Acknowledgment number
906  * to DV specification.
907  *
908  * @param[in,out] resource
909  *   Pointer to the modify-header resource.
910  * @param[in] action
911  *   Pointer to action specification.
912  * @param[out] error
913  *   Pointer to the error structure.
914  *
915  * @return
916  *   0 on success, a negative errno value otherwise and rte_errno is set.
917  */
918 static int
919 flow_dv_convert_action_modify_tcp_ack
920                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
921                          const struct rte_flow_action *action,
922                          struct rte_flow_error *error)
923 {
924         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
925         uint64_t value = rte_be_to_cpu_32(*conf);
926         struct rte_flow_item item;
927         struct rte_flow_item_tcp tcp;
928         struct rte_flow_item_tcp tcp_mask;
929
930         memset(&tcp, 0, sizeof(tcp));
931         memset(&tcp_mask, 0, sizeof(tcp_mask));
932         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
933                 /*
934                  * The HW has no decrement operation, only increment operation.
935                  * To simulate decrement X from Y using increment operation
936                  * we need to add UINT32_MAX X times to Y.
937                  * Each adding of UINT32_MAX decrements Y by 1.
938                  */
939                 value *= UINT32_MAX;
940         tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
941         tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
942         item.type = RTE_FLOW_ITEM_TYPE_TCP;
943         item.spec = &tcp;
944         item.mask = &tcp_mask;
945         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
946                                              MLX5_MODIFICATION_TYPE_ADD, error);
947 }
948
949 static enum mlx5_modification_field reg_to_field[] = {
950         [REG_NON] = MLX5_MODI_OUT_NONE,
951         [REG_A] = MLX5_MODI_META_DATA_REG_A,
952         [REG_B] = MLX5_MODI_META_DATA_REG_B,
953         [REG_C_0] = MLX5_MODI_META_REG_C_0,
954         [REG_C_1] = MLX5_MODI_META_REG_C_1,
955         [REG_C_2] = MLX5_MODI_META_REG_C_2,
956         [REG_C_3] = MLX5_MODI_META_REG_C_3,
957         [REG_C_4] = MLX5_MODI_META_REG_C_4,
958         [REG_C_5] = MLX5_MODI_META_REG_C_5,
959         [REG_C_6] = MLX5_MODI_META_REG_C_6,
960         [REG_C_7] = MLX5_MODI_META_REG_C_7,
961 };
962
963 /**
964  * Convert register set to DV specification.
965  *
966  * @param[in,out] resource
967  *   Pointer to the modify-header resource.
968  * @param[in] action
969  *   Pointer to action specification.
970  * @param[out] error
971  *   Pointer to the error structure.
972  *
973  * @return
974  *   0 on success, a negative errno value otherwise and rte_errno is set.
975  */
976 static int
977 flow_dv_convert_action_set_reg
978                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
979                          const struct rte_flow_action *action,
980                          struct rte_flow_error *error)
981 {
982         const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
983         struct mlx5_modification_cmd *actions = resource->actions;
984         uint32_t i = resource->actions_num;
985
986         if (i >= MLX5_MAX_MODIFY_NUM)
987                 return rte_flow_error_set(error, EINVAL,
988                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
989                                           "too many items to modify");
990         MLX5_ASSERT(conf->id != REG_NON);
991         MLX5_ASSERT(conf->id < (enum modify_reg)RTE_DIM(reg_to_field));
992         actions[i] = (struct mlx5_modification_cmd) {
993                 .action_type = MLX5_MODIFICATION_TYPE_SET,
994                 .field = reg_to_field[conf->id],
995                 .offset = conf->offset,
996                 .length = conf->length,
997         };
998         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
999         actions[i].data1 = rte_cpu_to_be_32(conf->data);
1000         ++i;
1001         resource->actions_num = i;
1002         return 0;
1003 }
1004
1005 /**
1006  * Convert SET_TAG action to DV specification.
1007  *
1008  * @param[in] dev
1009  *   Pointer to the rte_eth_dev structure.
1010  * @param[in,out] resource
1011  *   Pointer to the modify-header resource.
1012  * @param[in] conf
1013  *   Pointer to action specification.
1014  * @param[out] error
1015  *   Pointer to the error structure.
1016  *
1017  * @return
1018  *   0 on success, a negative errno value otherwise and rte_errno is set.
1019  */
1020 static int
1021 flow_dv_convert_action_set_tag
1022                         (struct rte_eth_dev *dev,
1023                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1024                          const struct rte_flow_action_set_tag *conf,
1025                          struct rte_flow_error *error)
1026 {
1027         rte_be32_t data = rte_cpu_to_be_32(conf->data);
1028         rte_be32_t mask = rte_cpu_to_be_32(conf->mask);
1029         struct rte_flow_item item = {
1030                 .spec = &data,
1031                 .mask = &mask,
1032         };
1033         struct field_modify_info reg_c_x[] = {
1034                 [1] = {0, 0, 0},
1035         };
1036         enum mlx5_modification_field reg_type;
1037         int ret;
1038
1039         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
1040         if (ret < 0)
1041                 return ret;
1042         MLX5_ASSERT(ret != REG_NON);
1043         MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
1044         reg_type = reg_to_field[ret];
1045         MLX5_ASSERT(reg_type > 0);
1046         reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
1047         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1048                                              MLX5_MODIFICATION_TYPE_SET, error);
1049 }
1050
1051 /**
1052  * Convert internal COPY_REG action to DV specification.
1053  *
1054  * @param[in] dev
1055  *   Pointer to the rte_eth_dev structure.
1056  * @param[in,out] res
1057  *   Pointer to the modify-header resource.
1058  * @param[in] action
1059  *   Pointer to action specification.
1060  * @param[out] error
1061  *   Pointer to the error structure.
1062  *
1063  * @return
1064  *   0 on success, a negative errno value otherwise and rte_errno is set.
1065  */
1066 static int
1067 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
1068                                  struct mlx5_flow_dv_modify_hdr_resource *res,
1069                                  const struct rte_flow_action *action,
1070                                  struct rte_flow_error *error)
1071 {
1072         const struct mlx5_flow_action_copy_mreg *conf = action->conf;
1073         rte_be32_t mask = RTE_BE32(UINT32_MAX);
1074         struct rte_flow_item item = {
1075                 .spec = NULL,
1076                 .mask = &mask,
1077         };
1078         struct field_modify_info reg_src[] = {
1079                 {4, 0, reg_to_field[conf->src]},
1080                 {0, 0, 0},
1081         };
1082         struct field_modify_info reg_dst = {
1083                 .offset = 0,
1084                 .id = reg_to_field[conf->dst],
1085         };
1086         /* Adjust reg_c[0] usage according to reported mask. */
1087         if (conf->dst == REG_C_0 || conf->src == REG_C_0) {
1088                 struct mlx5_priv *priv = dev->data->dev_private;
1089                 uint32_t reg_c0 = priv->sh->dv_regc0_mask;
1090
1091                 MLX5_ASSERT(reg_c0);
1092                 MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
1093                 if (conf->dst == REG_C_0) {
1094                         /* Copy to reg_c[0], within mask only. */
1095                         reg_dst.offset = rte_bsf32(reg_c0);
1096                         /*
1097                          * Mask is ignoring the enianness, because
1098                          * there is no conversion in datapath.
1099                          */
1100 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1101                         /* Copy from destination lower bits to reg_c[0]. */
1102                         mask = reg_c0 >> reg_dst.offset;
1103 #else
1104                         /* Copy from destination upper bits to reg_c[0]. */
1105                         mask = reg_c0 << (sizeof(reg_c0) * CHAR_BIT -
1106                                           rte_fls_u32(reg_c0));
1107 #endif
1108                 } else {
1109                         mask = rte_cpu_to_be_32(reg_c0);
1110 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1111                         /* Copy from reg_c[0] to destination lower bits. */
1112                         reg_dst.offset = 0;
1113 #else
1114                         /* Copy from reg_c[0] to destination upper bits. */
1115                         reg_dst.offset = sizeof(reg_c0) * CHAR_BIT -
1116                                          (rte_fls_u32(reg_c0) -
1117                                           rte_bsf32(reg_c0));
1118 #endif
1119                 }
1120         }
1121         return flow_dv_convert_modify_action(&item,
1122                                              reg_src, &reg_dst, res,
1123                                              MLX5_MODIFICATION_TYPE_COPY,
1124                                              error);
1125 }
1126
1127 /**
1128  * Convert MARK action to DV specification. This routine is used
1129  * in extensive metadata only and requires metadata register to be
1130  * handled. In legacy mode hardware tag resource is engaged.
1131  *
1132  * @param[in] dev
1133  *   Pointer to the rte_eth_dev structure.
1134  * @param[in] conf
1135  *   Pointer to MARK action specification.
1136  * @param[in,out] resource
1137  *   Pointer to the modify-header resource.
1138  * @param[out] error
1139  *   Pointer to the error structure.
1140  *
1141  * @return
1142  *   0 on success, a negative errno value otherwise and rte_errno is set.
1143  */
1144 static int
1145 flow_dv_convert_action_mark(struct rte_eth_dev *dev,
1146                             const struct rte_flow_action_mark *conf,
1147                             struct mlx5_flow_dv_modify_hdr_resource *resource,
1148                             struct rte_flow_error *error)
1149 {
1150         struct mlx5_priv *priv = dev->data->dev_private;
1151         rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK &
1152                                            priv->sh->dv_mark_mask);
1153         rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask;
1154         struct rte_flow_item item = {
1155                 .spec = &data,
1156                 .mask = &mask,
1157         };
1158         struct field_modify_info reg_c_x[] = {
1159                 [1] = {0, 0, 0},
1160         };
1161         int reg;
1162
1163         if (!mask)
1164                 return rte_flow_error_set(error, EINVAL,
1165                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1166                                           NULL, "zero mark action mask");
1167         reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1168         if (reg < 0)
1169                 return reg;
1170         MLX5_ASSERT(reg > 0);
1171         if (reg == REG_C_0) {
1172                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1173                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1174
1175                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1176                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1177                 mask = rte_cpu_to_be_32(mask << shl_c0);
1178         }
1179         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1180         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1181                                              MLX5_MODIFICATION_TYPE_SET, error);
1182 }
1183
1184 /**
1185  * Get metadata register index for specified steering domain.
1186  *
1187  * @param[in] dev
1188  *   Pointer to the rte_eth_dev structure.
1189  * @param[in] attr
1190  *   Attributes of flow to determine steering domain.
1191  * @param[out] error
1192  *   Pointer to the error structure.
1193  *
1194  * @return
1195  *   positive index on success, a negative errno value otherwise
1196  *   and rte_errno is set.
1197  */
1198 static enum modify_reg
1199 flow_dv_get_metadata_reg(struct rte_eth_dev *dev,
1200                          const struct rte_flow_attr *attr,
1201                          struct rte_flow_error *error)
1202 {
1203         int reg =
1204                 mlx5_flow_get_reg_id(dev, attr->transfer ?
1205                                           MLX5_METADATA_FDB :
1206                                             attr->egress ?
1207                                             MLX5_METADATA_TX :
1208                                             MLX5_METADATA_RX, 0, error);
1209         if (reg < 0)
1210                 return rte_flow_error_set(error,
1211                                           ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1212                                           NULL, "unavailable "
1213                                           "metadata register");
1214         return reg;
1215 }
1216
1217 /**
1218  * Convert SET_META action to DV specification.
1219  *
1220  * @param[in] dev
1221  *   Pointer to the rte_eth_dev structure.
1222  * @param[in,out] resource
1223  *   Pointer to the modify-header resource.
1224  * @param[in] attr
1225  *   Attributes of flow that includes this item.
1226  * @param[in] conf
1227  *   Pointer to action specification.
1228  * @param[out] error
1229  *   Pointer to the error structure.
1230  *
1231  * @return
1232  *   0 on success, a negative errno value otherwise and rte_errno is set.
1233  */
1234 static int
1235 flow_dv_convert_action_set_meta
1236                         (struct rte_eth_dev *dev,
1237                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1238                          const struct rte_flow_attr *attr,
1239                          const struct rte_flow_action_set_meta *conf,
1240                          struct rte_flow_error *error)
1241 {
1242         uint32_t data = conf->data;
1243         uint32_t mask = conf->mask;
1244         struct rte_flow_item item = {
1245                 .spec = &data,
1246                 .mask = &mask,
1247         };
1248         struct field_modify_info reg_c_x[] = {
1249                 [1] = {0, 0, 0},
1250         };
1251         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1252
1253         if (reg < 0)
1254                 return reg;
1255         MLX5_ASSERT(reg != REG_NON);
1256         /*
1257          * In datapath code there is no endianness
1258          * coversions for perfromance reasons, all
1259          * pattern conversions are done in rte_flow.
1260          */
1261         if (reg == REG_C_0) {
1262                 struct mlx5_priv *priv = dev->data->dev_private;
1263                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1264                 uint32_t shl_c0;
1265
1266                 MLX5_ASSERT(msk_c0);
1267 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1268                 shl_c0 = rte_bsf32(msk_c0);
1269 #else
1270                 shl_c0 = sizeof(msk_c0) * CHAR_BIT - rte_fls_u32(msk_c0);
1271 #endif
1272                 mask <<= shl_c0;
1273                 data <<= shl_c0;
1274                 MLX5_ASSERT(!(~msk_c0 & rte_cpu_to_be_32(mask)));
1275         }
1276         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1277         /* The routine expects parameters in memory as big-endian ones. */
1278         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1279                                              MLX5_MODIFICATION_TYPE_SET, error);
1280 }
1281
1282 /**
1283  * Convert modify-header set IPv4 DSCP action to DV specification.
1284  *
1285  * @param[in,out] resource
1286  *   Pointer to the modify-header resource.
1287  * @param[in] action
1288  *   Pointer to action specification.
1289  * @param[out] error
1290  *   Pointer to the error structure.
1291  *
1292  * @return
1293  *   0 on success, a negative errno value otherwise and rte_errno is set.
1294  */
1295 static int
1296 flow_dv_convert_action_modify_ipv4_dscp
1297                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1298                          const struct rte_flow_action *action,
1299                          struct rte_flow_error *error)
1300 {
1301         const struct rte_flow_action_set_dscp *conf =
1302                 (const struct rte_flow_action_set_dscp *)(action->conf);
1303         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
1304         struct rte_flow_item_ipv4 ipv4;
1305         struct rte_flow_item_ipv4 ipv4_mask;
1306
1307         memset(&ipv4, 0, sizeof(ipv4));
1308         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
1309         ipv4.hdr.type_of_service = conf->dscp;
1310         ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2;
1311         item.spec = &ipv4;
1312         item.mask = &ipv4_mask;
1313         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
1314                                              MLX5_MODIFICATION_TYPE_SET, error);
1315 }
1316
1317 /**
1318  * Convert modify-header set IPv6 DSCP action to DV specification.
1319  *
1320  * @param[in,out] resource
1321  *   Pointer to the modify-header resource.
1322  * @param[in] action
1323  *   Pointer to action specification.
1324  * @param[out] error
1325  *   Pointer to the error structure.
1326  *
1327  * @return
1328  *   0 on success, a negative errno value otherwise and rte_errno is set.
1329  */
1330 static int
1331 flow_dv_convert_action_modify_ipv6_dscp
1332                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1333                          const struct rte_flow_action *action,
1334                          struct rte_flow_error *error)
1335 {
1336         const struct rte_flow_action_set_dscp *conf =
1337                 (const struct rte_flow_action_set_dscp *)(action->conf);
1338         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
1339         struct rte_flow_item_ipv6 ipv6;
1340         struct rte_flow_item_ipv6 ipv6_mask;
1341
1342         memset(&ipv6, 0, sizeof(ipv6));
1343         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
1344         /*
1345          * Even though the DSCP bits offset of IPv6 is not byte aligned,
1346          * rdma-core only accept the DSCP bits byte aligned start from
1347          * bit 0 to 5 as to be compatible with IPv4. No need to shift the
1348          * bits in IPv6 case as rdma-core requires byte aligned value.
1349          */
1350         ipv6.hdr.vtc_flow = conf->dscp;
1351         ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22;
1352         item.spec = &ipv6;
1353         item.mask = &ipv6_mask;
1354         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
1355                                              MLX5_MODIFICATION_TYPE_SET, error);
1356 }
1357
1358 static int
1359 mlx5_flow_item_field_width(struct mlx5_dev_config *config,
1360                            enum rte_flow_field_id field)
1361 {
1362         switch (field) {
1363         case RTE_FLOW_FIELD_START:
1364                 return 32;
1365         case RTE_FLOW_FIELD_MAC_DST:
1366         case RTE_FLOW_FIELD_MAC_SRC:
1367                 return 48;
1368         case RTE_FLOW_FIELD_VLAN_TYPE:
1369                 return 16;
1370         case RTE_FLOW_FIELD_VLAN_ID:
1371                 return 12;
1372         case RTE_FLOW_FIELD_MAC_TYPE:
1373                 return 16;
1374         case RTE_FLOW_FIELD_IPV4_DSCP:
1375                 return 6;
1376         case RTE_FLOW_FIELD_IPV4_TTL:
1377                 return 8;
1378         case RTE_FLOW_FIELD_IPV4_SRC:
1379         case RTE_FLOW_FIELD_IPV4_DST:
1380                 return 32;
1381         case RTE_FLOW_FIELD_IPV6_DSCP:
1382                 return 6;
1383         case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1384                 return 8;
1385         case RTE_FLOW_FIELD_IPV6_SRC:
1386         case RTE_FLOW_FIELD_IPV6_DST:
1387                 return 128;
1388         case RTE_FLOW_FIELD_TCP_PORT_SRC:
1389         case RTE_FLOW_FIELD_TCP_PORT_DST:
1390                 return 16;
1391         case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1392         case RTE_FLOW_FIELD_TCP_ACK_NUM:
1393                 return 32;
1394         case RTE_FLOW_FIELD_TCP_FLAGS:
1395                 return 9;
1396         case RTE_FLOW_FIELD_UDP_PORT_SRC:
1397         case RTE_FLOW_FIELD_UDP_PORT_DST:
1398                 return 16;
1399         case RTE_FLOW_FIELD_VXLAN_VNI:
1400         case RTE_FLOW_FIELD_GENEVE_VNI:
1401                 return 24;
1402         case RTE_FLOW_FIELD_GTP_TEID:
1403         case RTE_FLOW_FIELD_TAG:
1404                 return 32;
1405         case RTE_FLOW_FIELD_MARK:
1406                 return 24;
1407         case RTE_FLOW_FIELD_META:
1408                 if (config->dv_xmeta_en == MLX5_XMETA_MODE_META16)
1409                         return 16;
1410                 else if (config->dv_xmeta_en == MLX5_XMETA_MODE_META32)
1411                         return 32;
1412                 else
1413                         return 0;
1414         case RTE_FLOW_FIELD_POINTER:
1415         case RTE_FLOW_FIELD_VALUE:
1416                 return 64;
1417         default:
1418                 MLX5_ASSERT(false);
1419         }
1420         return 0;
1421 }
1422
1423 static void
1424 mlx5_flow_field_id_to_modify_info
1425                 (const struct rte_flow_action_modify_data *data,
1426                  struct field_modify_info *info,
1427                  uint32_t *mask, uint32_t *value,
1428                  uint32_t width, uint32_t dst_width,
1429                  struct rte_eth_dev *dev,
1430                  const struct rte_flow_attr *attr,
1431                  struct rte_flow_error *error)
1432 {
1433         struct mlx5_priv *priv = dev->data->dev_private;
1434         struct mlx5_dev_config *config = &priv->config;
1435         uint32_t idx = 0;
1436         uint64_t val = 0;
1437         switch (data->field) {
1438         case RTE_FLOW_FIELD_START:
1439                 /* not supported yet */
1440                 MLX5_ASSERT(false);
1441                 break;
1442         case RTE_FLOW_FIELD_MAC_DST:
1443                 if (mask) {
1444                         if (data->offset < 32) {
1445                                 info[idx] = (struct field_modify_info){4, 0,
1446                                                 MLX5_MODI_OUT_DMAC_47_16};
1447                                 if (width < 32) {
1448                                         mask[idx] =
1449                                                 rte_cpu_to_be_32(0xffffffff >>
1450                                                                  (32 - width));
1451                                         width = 0;
1452                                 } else {
1453                                         mask[idx] = RTE_BE32(0xffffffff);
1454                                         width -= 32;
1455                                 }
1456                                 if (!width)
1457                                         break;
1458                                 ++idx;
1459                         }
1460                         info[idx] = (struct field_modify_info){2, 4 * idx,
1461                                                 MLX5_MODI_OUT_DMAC_15_0};
1462                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1463                 } else {
1464                         if (data->offset < 32)
1465                                 info[idx++] = (struct field_modify_info){4, 0,
1466                                                 MLX5_MODI_OUT_DMAC_47_16};
1467                         info[idx] = (struct field_modify_info){2, 0,
1468                                                 MLX5_MODI_OUT_DMAC_15_0};
1469                 }
1470                 break;
1471         case RTE_FLOW_FIELD_MAC_SRC:
1472                 if (mask) {
1473                         if (data->offset < 32) {
1474                                 info[idx] = (struct field_modify_info){4, 0,
1475                                                 MLX5_MODI_OUT_SMAC_47_16};
1476                                 if (width < 32) {
1477                                         mask[idx] =
1478                                                 rte_cpu_to_be_32(0xffffffff >>
1479                                                                 (32 - width));
1480                                         width = 0;
1481                                 } else {
1482                                         mask[idx] = RTE_BE32(0xffffffff);
1483                                         width -= 32;
1484                                 }
1485                                 if (!width)
1486                                         break;
1487                                 ++idx;
1488                         }
1489                         info[idx] = (struct field_modify_info){2, 4 * idx,
1490                                                 MLX5_MODI_OUT_SMAC_15_0};
1491                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1492                 } else {
1493                         if (data->offset < 32)
1494                                 info[idx++] = (struct field_modify_info){4, 0,
1495                                                 MLX5_MODI_OUT_SMAC_47_16};
1496                         info[idx] = (struct field_modify_info){2, 0,
1497                                                 MLX5_MODI_OUT_SMAC_15_0};
1498                 }
1499                 break;
1500         case RTE_FLOW_FIELD_VLAN_TYPE:
1501                 /* not supported yet */
1502                 break;
1503         case RTE_FLOW_FIELD_VLAN_ID:
1504                 info[idx] = (struct field_modify_info){2, 0,
1505                                         MLX5_MODI_OUT_FIRST_VID};
1506                 if (mask)
1507                         mask[idx] = rte_cpu_to_be_16(0x0fff >> (12 - width));
1508                 break;
1509         case RTE_FLOW_FIELD_MAC_TYPE:
1510                 info[idx] = (struct field_modify_info){2, 0,
1511                                         MLX5_MODI_OUT_ETHERTYPE};
1512                 if (mask)
1513                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1514                 break;
1515         case RTE_FLOW_FIELD_IPV4_DSCP:
1516                 info[idx] = (struct field_modify_info){1, 0,
1517                                         MLX5_MODI_OUT_IP_DSCP};
1518                 if (mask)
1519                         mask[idx] = 0x3f >> (6 - width);
1520                 break;
1521         case RTE_FLOW_FIELD_IPV4_TTL:
1522                 info[idx] = (struct field_modify_info){1, 0,
1523                                         MLX5_MODI_OUT_IPV4_TTL};
1524                 if (mask)
1525                         mask[idx] = 0xff >> (8 - width);
1526                 break;
1527         case RTE_FLOW_FIELD_IPV4_SRC:
1528                 info[idx] = (struct field_modify_info){4, 0,
1529                                         MLX5_MODI_OUT_SIPV4};
1530                 if (mask)
1531                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1532                                                      (32 - width));
1533                 break;
1534         case RTE_FLOW_FIELD_IPV4_DST:
1535                 info[idx] = (struct field_modify_info){4, 0,
1536                                         MLX5_MODI_OUT_DIPV4};
1537                 if (mask)
1538                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1539                                                      (32 - width));
1540                 break;
1541         case RTE_FLOW_FIELD_IPV6_DSCP:
1542                 info[idx] = (struct field_modify_info){1, 0,
1543                                         MLX5_MODI_OUT_IP_DSCP};
1544                 if (mask)
1545                         mask[idx] = 0x3f >> (6 - width);
1546                 break;
1547         case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1548                 info[idx] = (struct field_modify_info){1, 0,
1549                                         MLX5_MODI_OUT_IPV6_HOPLIMIT};
1550                 if (mask)
1551                         mask[idx] = 0xff >> (8 - width);
1552                 break;
1553         case RTE_FLOW_FIELD_IPV6_SRC:
1554                 if (mask) {
1555                         if (data->offset < 32) {
1556                                 info[idx] = (struct field_modify_info){4,
1557                                                 4 * idx,
1558                                                 MLX5_MODI_OUT_SIPV6_31_0};
1559                                 if (width < 32) {
1560                                         mask[idx] =
1561                                                 rte_cpu_to_be_32(0xffffffff >>
1562                                                                  (32 - width));
1563                                         width = 0;
1564                                 } else {
1565                                         mask[idx] = RTE_BE32(0xffffffff);
1566                                         width -= 32;
1567                                 }
1568                                 if (!width)
1569                                         break;
1570                                 ++idx;
1571                         }
1572                         if (data->offset < 64) {
1573                                 info[idx] = (struct field_modify_info){4,
1574                                                 4 * idx,
1575                                                 MLX5_MODI_OUT_SIPV6_63_32};
1576                                 if (width < 32) {
1577                                         mask[idx] =
1578                                                 rte_cpu_to_be_32(0xffffffff >>
1579                                                                  (32 - width));
1580                                         width = 0;
1581                                 } else {
1582                                         mask[idx] = RTE_BE32(0xffffffff);
1583                                         width -= 32;
1584                                 }
1585                                 if (!width)
1586                                         break;
1587                                 ++idx;
1588                         }
1589                         if (data->offset < 96) {
1590                                 info[idx] = (struct field_modify_info){4,
1591                                                 4 * idx,
1592                                                 MLX5_MODI_OUT_SIPV6_95_64};
1593                                 if (width < 32) {
1594                                         mask[idx] =
1595                                                 rte_cpu_to_be_32(0xffffffff >>
1596                                                                  (32 - width));
1597                                         width = 0;
1598                                 } else {
1599                                         mask[idx] = RTE_BE32(0xffffffff);
1600                                         width -= 32;
1601                                 }
1602                                 if (!width)
1603                                         break;
1604                                 ++idx;
1605                         }
1606                         info[idx] = (struct field_modify_info){4, 4 * idx,
1607                                                 MLX5_MODI_OUT_SIPV6_127_96};
1608                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1609                                                      (32 - width));
1610                 } else {
1611                         if (data->offset < 32)
1612                                 info[idx++] = (struct field_modify_info){4, 0,
1613                                                 MLX5_MODI_OUT_SIPV6_31_0};
1614                         if (data->offset < 64)
1615                                 info[idx++] = (struct field_modify_info){4, 0,
1616                                                 MLX5_MODI_OUT_SIPV6_63_32};
1617                         if (data->offset < 96)
1618                                 info[idx++] = (struct field_modify_info){4, 0,
1619                                                 MLX5_MODI_OUT_SIPV6_95_64};
1620                         if (data->offset < 128)
1621                                 info[idx++] = (struct field_modify_info){4, 0,
1622                                                 MLX5_MODI_OUT_SIPV6_127_96};
1623                 }
1624                 break;
1625         case RTE_FLOW_FIELD_IPV6_DST:
1626                 if (mask) {
1627                         if (data->offset < 32) {
1628                                 info[idx] = (struct field_modify_info){4,
1629                                                 4 * idx,
1630                                                 MLX5_MODI_OUT_DIPV6_31_0};
1631                                 if (width < 32) {
1632                                         mask[idx] =
1633                                                 rte_cpu_to_be_32(0xffffffff >>
1634                                                                  (32 - width));
1635                                         width = 0;
1636                                 } else {
1637                                         mask[idx] = RTE_BE32(0xffffffff);
1638                                         width -= 32;
1639                                 }
1640                                 if (!width)
1641                                         break;
1642                                 ++idx;
1643                         }
1644                         if (data->offset < 64) {
1645                                 info[idx] = (struct field_modify_info){4,
1646                                                 4 * idx,
1647                                                 MLX5_MODI_OUT_DIPV6_63_32};
1648                                 if (width < 32) {
1649                                         mask[idx] =
1650                                                 rte_cpu_to_be_32(0xffffffff >>
1651                                                                  (32 - width));
1652                                         width = 0;
1653                                 } else {
1654                                         mask[idx] = RTE_BE32(0xffffffff);
1655                                         width -= 32;
1656                                 }
1657                                 if (!width)
1658                                         break;
1659                                 ++idx;
1660                         }
1661                         if (data->offset < 96) {
1662                                 info[idx] = (struct field_modify_info){4,
1663                                                 4 * idx,
1664                                                 MLX5_MODI_OUT_DIPV6_95_64};
1665                                 if (width < 32) {
1666                                         mask[idx] =
1667                                                 rte_cpu_to_be_32(0xffffffff >>
1668                                                                  (32 - width));
1669                                         width = 0;
1670                                 } else {
1671                                         mask[idx] = RTE_BE32(0xffffffff);
1672                                         width -= 32;
1673                                 }
1674                                 if (!width)
1675                                         break;
1676                                 ++idx;
1677                         }
1678                         info[idx] = (struct field_modify_info){4, 4 * idx,
1679                                                 MLX5_MODI_OUT_DIPV6_127_96};
1680                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1681                                                      (32 - width));
1682                 } else {
1683                         if (data->offset < 32)
1684                                 info[idx++] = (struct field_modify_info){4, 0,
1685                                                 MLX5_MODI_OUT_DIPV6_31_0};
1686                         if (data->offset < 64)
1687                                 info[idx++] = (struct field_modify_info){4, 0,
1688                                                 MLX5_MODI_OUT_DIPV6_63_32};
1689                         if (data->offset < 96)
1690                                 info[idx++] = (struct field_modify_info){4, 0,
1691                                                 MLX5_MODI_OUT_DIPV6_95_64};
1692                         if (data->offset < 128)
1693                                 info[idx++] = (struct field_modify_info){4, 0,
1694                                                 MLX5_MODI_OUT_DIPV6_127_96};
1695                 }
1696                 break;
1697         case RTE_FLOW_FIELD_TCP_PORT_SRC:
1698                 info[idx] = (struct field_modify_info){2, 0,
1699                                         MLX5_MODI_OUT_TCP_SPORT};
1700                 if (mask)
1701                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1702                 break;
1703         case RTE_FLOW_FIELD_TCP_PORT_DST:
1704                 info[idx] = (struct field_modify_info){2, 0,
1705                                         MLX5_MODI_OUT_TCP_DPORT};
1706                 if (mask)
1707                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1708                 break;
1709         case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1710                 info[idx] = (struct field_modify_info){4, 0,
1711                                         MLX5_MODI_OUT_TCP_SEQ_NUM};
1712                 if (mask)
1713                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1714                                                      (32 - width));
1715                 break;
1716         case RTE_FLOW_FIELD_TCP_ACK_NUM:
1717                 info[idx] = (struct field_modify_info){4, 0,
1718                                         MLX5_MODI_OUT_TCP_ACK_NUM};
1719                 if (mask)
1720                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1721                                                      (32 - width));
1722                 break;
1723         case RTE_FLOW_FIELD_TCP_FLAGS:
1724                 info[idx] = (struct field_modify_info){2, 0,
1725                                         MLX5_MODI_OUT_TCP_FLAGS};
1726                 if (mask)
1727                         mask[idx] = rte_cpu_to_be_16(0x1ff >> (9 - width));
1728                 break;
1729         case RTE_FLOW_FIELD_UDP_PORT_SRC:
1730                 info[idx] = (struct field_modify_info){2, 0,
1731                                         MLX5_MODI_OUT_UDP_SPORT};
1732                 if (mask)
1733                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1734                 break;
1735         case RTE_FLOW_FIELD_UDP_PORT_DST:
1736                 info[idx] = (struct field_modify_info){2, 0,
1737                                         MLX5_MODI_OUT_UDP_DPORT};
1738                 if (mask)
1739                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1740                 break;
1741         case RTE_FLOW_FIELD_VXLAN_VNI:
1742                 /* not supported yet */
1743                 break;
1744         case RTE_FLOW_FIELD_GENEVE_VNI:
1745                 /* not supported yet*/
1746                 break;
1747         case RTE_FLOW_FIELD_GTP_TEID:
1748                 info[idx] = (struct field_modify_info){4, 0,
1749                                         MLX5_MODI_GTP_TEID};
1750                 if (mask)
1751                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1752                                                      (32 - width));
1753                 break;
1754         case RTE_FLOW_FIELD_TAG:
1755                 {
1756                         int reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG,
1757                                                    data->level, error);
1758                         if (reg < 0)
1759                                 return;
1760                         MLX5_ASSERT(reg != REG_NON);
1761                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1762                         info[idx] = (struct field_modify_info){4, 0,
1763                                                 reg_to_field[reg]};
1764                         if (mask)
1765                                 mask[idx] =
1766                                         rte_cpu_to_be_32(0xffffffff >>
1767                                                          (32 - width));
1768                 }
1769                 break;
1770         case RTE_FLOW_FIELD_MARK:
1771                 {
1772                         int reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK,
1773                                                        0, error);
1774                         if (reg < 0)
1775                                 return;
1776                         MLX5_ASSERT(reg != REG_NON);
1777                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1778                         info[idx] = (struct field_modify_info){4, 0,
1779                                                 reg_to_field[reg]};
1780                         if (mask)
1781                                 mask[idx] =
1782                                         rte_cpu_to_be_32(0xffffffff >>
1783                                                          (32 - width));
1784                 }
1785                 break;
1786         case RTE_FLOW_FIELD_META:
1787                 {
1788                         unsigned int xmeta = config->dv_xmeta_en;
1789                         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1790                         if (reg < 0)
1791                                 return;
1792                         MLX5_ASSERT(reg != REG_NON);
1793                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1794                         if (xmeta == MLX5_XMETA_MODE_META16) {
1795                                 info[idx] = (struct field_modify_info){2, 0,
1796                                                         reg_to_field[reg]};
1797                                 if (mask)
1798                                         mask[idx] = rte_cpu_to_be_16(0xffff >>
1799                                                                 (16 - width));
1800                         } else if (xmeta == MLX5_XMETA_MODE_META32) {
1801                                 info[idx] = (struct field_modify_info){4, 0,
1802                                                         reg_to_field[reg]};
1803                                 if (mask)
1804                                         mask[idx] =
1805                                                 rte_cpu_to_be_32(0xffffffff >>
1806                                                                 (32 - width));
1807                         } else {
1808                                 MLX5_ASSERT(false);
1809                         }
1810                 }
1811                 break;
1812         case RTE_FLOW_FIELD_POINTER:
1813         case RTE_FLOW_FIELD_VALUE:
1814                 if (data->field == RTE_FLOW_FIELD_POINTER)
1815                         memcpy(&val, (void *)(uintptr_t)data->value,
1816                                sizeof(uint64_t));
1817                 else
1818                         val = data->value;
1819                 for (idx = 0; idx < MLX5_ACT_MAX_MOD_FIELDS; idx++) {
1820                         if (mask[idx]) {
1821                                 if (dst_width > 16) {
1822                                         value[idx] = rte_cpu_to_be_32(val);
1823                                         val >>= 32;
1824                                 } else if (dst_width > 8) {
1825                                         value[idx] = rte_cpu_to_be_16(val);
1826                                         val >>= 16;
1827                                 } else {
1828                                         value[idx] = (uint8_t)val;
1829                                         val >>= 8;
1830                                 }
1831                                 if (!val)
1832                                         break;
1833                         }
1834                 }
1835                 break;
1836         default:
1837                 MLX5_ASSERT(false);
1838                 break;
1839         }
1840 }
1841
1842 /**
1843  * Convert modify_field action to DV specification.
1844  *
1845  * @param[in] dev
1846  *   Pointer to the rte_eth_dev structure.
1847  * @param[in,out] resource
1848  *   Pointer to the modify-header resource.
1849  * @param[in] action
1850  *   Pointer to action specification.
1851  * @param[in] attr
1852  *   Attributes of flow that includes this item.
1853  * @param[out] error
1854  *   Pointer to the error structure.
1855  *
1856  * @return
1857  *   0 on success, a negative errno value otherwise and rte_errno is set.
1858  */
1859 static int
1860 flow_dv_convert_action_modify_field
1861                         (struct rte_eth_dev *dev,
1862                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1863                          const struct rte_flow_action *action,
1864                          const struct rte_flow_attr *attr,
1865                          struct rte_flow_error *error)
1866 {
1867         struct mlx5_priv *priv = dev->data->dev_private;
1868         struct mlx5_dev_config *config = &priv->config;
1869         const struct rte_flow_action_modify_field *conf =
1870                 (const struct rte_flow_action_modify_field *)(action->conf);
1871         struct rte_flow_item item;
1872         struct field_modify_info field[MLX5_ACT_MAX_MOD_FIELDS] = {
1873                                                                 {0, 0, 0} };
1874         struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS] = {
1875                                                                 {0, 0, 0} };
1876         uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
1877         uint32_t value[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
1878         uint32_t type;
1879         uint32_t dst_width = mlx5_flow_item_field_width(config,
1880                                                         conf->dst.field);
1881
1882         if (conf->src.field == RTE_FLOW_FIELD_POINTER ||
1883                 conf->src.field == RTE_FLOW_FIELD_VALUE) {
1884                 type = MLX5_MODIFICATION_TYPE_SET;
1885                 /** For SET fill the destination field (field) first. */
1886                 mlx5_flow_field_id_to_modify_info(&conf->dst, field, mask,
1887                         value, conf->width, dst_width, dev, attr, error);
1888                 /** Then copy immediate value from source as per mask. */
1889                 mlx5_flow_field_id_to_modify_info(&conf->src, dcopy, mask,
1890                         value, conf->width, dst_width, dev, attr, error);
1891                 item.spec = &value;
1892         } else {
1893                 type = MLX5_MODIFICATION_TYPE_COPY;
1894                 /** For COPY fill the destination field (dcopy) without mask. */
1895                 mlx5_flow_field_id_to_modify_info(&conf->dst, dcopy, NULL,
1896                         value, conf->width, dst_width, dev, attr, error);
1897                 /** Then construct the source field (field) with mask. */
1898                 mlx5_flow_field_id_to_modify_info(&conf->src, field, mask,
1899                         value, conf->width, dst_width, dev, attr, error);
1900         }
1901         item.mask = &mask;
1902         return flow_dv_convert_modify_action(&item,
1903                         field, dcopy, resource, type, error);
1904 }
1905
1906 /**
1907  * Validate MARK item.
1908  *
1909  * @param[in] dev
1910  *   Pointer to the rte_eth_dev structure.
1911  * @param[in] item
1912  *   Item specification.
1913  * @param[in] attr
1914  *   Attributes of flow that includes this item.
1915  * @param[out] error
1916  *   Pointer to error structure.
1917  *
1918  * @return
1919  *   0 on success, a negative errno value otherwise and rte_errno is set.
1920  */
1921 static int
1922 flow_dv_validate_item_mark(struct rte_eth_dev *dev,
1923                            const struct rte_flow_item *item,
1924                            const struct rte_flow_attr *attr __rte_unused,
1925                            struct rte_flow_error *error)
1926 {
1927         struct mlx5_priv *priv = dev->data->dev_private;
1928         struct mlx5_dev_config *config = &priv->config;
1929         const struct rte_flow_item_mark *spec = item->spec;
1930         const struct rte_flow_item_mark *mask = item->mask;
1931         const struct rte_flow_item_mark nic_mask = {
1932                 .id = priv->sh->dv_mark_mask,
1933         };
1934         int ret;
1935
1936         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1937                 return rte_flow_error_set(error, ENOTSUP,
1938                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1939                                           "extended metadata feature"
1940                                           " isn't enabled");
1941         if (!mlx5_flow_ext_mreg_supported(dev))
1942                 return rte_flow_error_set(error, ENOTSUP,
1943                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1944                                           "extended metadata register"
1945                                           " isn't supported");
1946         if (!nic_mask.id)
1947                 return rte_flow_error_set(error, ENOTSUP,
1948                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1949                                           "extended metadata register"
1950                                           " isn't available");
1951         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1952         if (ret < 0)
1953                 return ret;
1954         if (!spec)
1955                 return rte_flow_error_set(error, EINVAL,
1956                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1957                                           item->spec,
1958                                           "data cannot be empty");
1959         if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id))
1960                 return rte_flow_error_set(error, EINVAL,
1961                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1962                                           &spec->id,
1963                                           "mark id exceeds the limit");
1964         if (!mask)
1965                 mask = &nic_mask;
1966         if (!mask->id)
1967                 return rte_flow_error_set(error, EINVAL,
1968                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1969                                         "mask cannot be zero");
1970
1971         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1972                                         (const uint8_t *)&nic_mask,
1973                                         sizeof(struct rte_flow_item_mark),
1974                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1975         if (ret < 0)
1976                 return ret;
1977         return 0;
1978 }
1979
1980 /**
1981  * Validate META item.
1982  *
1983  * @param[in] dev
1984  *   Pointer to the rte_eth_dev structure.
1985  * @param[in] item
1986  *   Item specification.
1987  * @param[in] attr
1988  *   Attributes of flow that includes this item.
1989  * @param[out] error
1990  *   Pointer to error structure.
1991  *
1992  * @return
1993  *   0 on success, a negative errno value otherwise and rte_errno is set.
1994  */
1995 static int
1996 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
1997                            const struct rte_flow_item *item,
1998                            const struct rte_flow_attr *attr,
1999                            struct rte_flow_error *error)
2000 {
2001         struct mlx5_priv *priv = dev->data->dev_private;
2002         struct mlx5_dev_config *config = &priv->config;
2003         const struct rte_flow_item_meta *spec = item->spec;
2004         const struct rte_flow_item_meta *mask = item->mask;
2005         struct rte_flow_item_meta nic_mask = {
2006                 .data = UINT32_MAX
2007         };
2008         int reg;
2009         int ret;
2010
2011         if (!spec)
2012                 return rte_flow_error_set(error, EINVAL,
2013                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2014                                           item->spec,
2015                                           "data cannot be empty");
2016         if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
2017                 if (!mlx5_flow_ext_mreg_supported(dev))
2018                         return rte_flow_error_set(error, ENOTSUP,
2019                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2020                                           "extended metadata register"
2021                                           " isn't supported");
2022                 reg = flow_dv_get_metadata_reg(dev, attr, error);
2023                 if (reg < 0)
2024                         return reg;
2025                 if (reg == REG_NON)
2026                         return rte_flow_error_set(error, ENOTSUP,
2027                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2028                                         "unavalable extended metadata register");
2029                 if (reg == REG_B)
2030                         return rte_flow_error_set(error, ENOTSUP,
2031                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2032                                           "match on reg_b "
2033                                           "isn't supported");
2034                 if (reg != REG_A)
2035                         nic_mask.data = priv->sh->dv_meta_mask;
2036         } else {
2037                 if (attr->transfer)
2038                         return rte_flow_error_set(error, ENOTSUP,
2039                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2040                                         "extended metadata feature "
2041                                         "should be enabled when "
2042                                         "meta item is requested "
2043                                         "with e-switch mode ");
2044                 if (attr->ingress)
2045                         return rte_flow_error_set(error, ENOTSUP,
2046                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2047                                         "match on metadata for ingress "
2048                                         "is not supported in legacy "
2049                                         "metadata mode");
2050         }
2051         if (!mask)
2052                 mask = &rte_flow_item_meta_mask;
2053         if (!mask->data)
2054                 return rte_flow_error_set(error, EINVAL,
2055                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2056                                         "mask cannot be zero");
2057
2058         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2059                                         (const uint8_t *)&nic_mask,
2060                                         sizeof(struct rte_flow_item_meta),
2061                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2062         return ret;
2063 }
2064
2065 /**
2066  * Validate TAG item.
2067  *
2068  * @param[in] dev
2069  *   Pointer to the rte_eth_dev structure.
2070  * @param[in] item
2071  *   Item specification.
2072  * @param[in] attr
2073  *   Attributes of flow that includes this item.
2074  * @param[out] error
2075  *   Pointer to error structure.
2076  *
2077  * @return
2078  *   0 on success, a negative errno value otherwise and rte_errno is set.
2079  */
2080 static int
2081 flow_dv_validate_item_tag(struct rte_eth_dev *dev,
2082                           const struct rte_flow_item *item,
2083                           const struct rte_flow_attr *attr __rte_unused,
2084                           struct rte_flow_error *error)
2085 {
2086         const struct rte_flow_item_tag *spec = item->spec;
2087         const struct rte_flow_item_tag *mask = item->mask;
2088         const struct rte_flow_item_tag nic_mask = {
2089                 .data = RTE_BE32(UINT32_MAX),
2090                 .index = 0xff,
2091         };
2092         int ret;
2093
2094         if (!mlx5_flow_ext_mreg_supported(dev))
2095                 return rte_flow_error_set(error, ENOTSUP,
2096                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2097                                           "extensive metadata register"
2098                                           " isn't supported");
2099         if (!spec)
2100                 return rte_flow_error_set(error, EINVAL,
2101                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2102                                           item->spec,
2103                                           "data cannot be empty");
2104         if (!mask)
2105                 mask = &rte_flow_item_tag_mask;
2106         if (!mask->data)
2107                 return rte_flow_error_set(error, EINVAL,
2108                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2109                                         "mask cannot be zero");
2110
2111         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2112                                         (const uint8_t *)&nic_mask,
2113                                         sizeof(struct rte_flow_item_tag),
2114                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2115         if (ret < 0)
2116                 return ret;
2117         if (mask->index != 0xff)
2118                 return rte_flow_error_set(error, EINVAL,
2119                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2120                                           "partial mask for tag index"
2121                                           " is not supported");
2122         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
2123         if (ret < 0)
2124                 return ret;
2125         MLX5_ASSERT(ret != REG_NON);
2126         return 0;
2127 }
2128
2129 /**
2130  * Validate vport item.
2131  *
2132  * @param[in] dev
2133  *   Pointer to the rte_eth_dev structure.
2134  * @param[in] item
2135  *   Item specification.
2136  * @param[in] attr
2137  *   Attributes of flow that includes this item.
2138  * @param[in] item_flags
2139  *   Bit-fields that holds the items detected until now.
2140  * @param[out] error
2141  *   Pointer to error structure.
2142  *
2143  * @return
2144  *   0 on success, a negative errno value otherwise and rte_errno is set.
2145  */
2146 static int
2147 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
2148                               const struct rte_flow_item *item,
2149                               const struct rte_flow_attr *attr,
2150                               uint64_t item_flags,
2151                               struct rte_flow_error *error)
2152 {
2153         const struct rte_flow_item_port_id *spec = item->spec;
2154         const struct rte_flow_item_port_id *mask = item->mask;
2155         const struct rte_flow_item_port_id switch_mask = {
2156                         .id = 0xffffffff,
2157         };
2158         struct mlx5_priv *esw_priv;
2159         struct mlx5_priv *dev_priv;
2160         int ret;
2161
2162         if (!attr->transfer)
2163                 return rte_flow_error_set(error, EINVAL,
2164                                           RTE_FLOW_ERROR_TYPE_ITEM,
2165                                           NULL,
2166                                           "match on port id is valid only"
2167                                           " when transfer flag is enabled");
2168         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
2169                 return rte_flow_error_set(error, ENOTSUP,
2170                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2171                                           "multiple source ports are not"
2172                                           " supported");
2173         if (!mask)
2174                 mask = &switch_mask;
2175         if (mask->id != 0xffffffff)
2176                 return rte_flow_error_set(error, ENOTSUP,
2177                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2178                                            mask,
2179                                            "no support for partial mask on"
2180                                            " \"id\" field");
2181         ret = mlx5_flow_item_acceptable
2182                                 (item, (const uint8_t *)mask,
2183                                  (const uint8_t *)&rte_flow_item_port_id_mask,
2184                                  sizeof(struct rte_flow_item_port_id),
2185                                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2186         if (ret)
2187                 return ret;
2188         if (!spec)
2189                 return 0;
2190         esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
2191         if (!esw_priv)
2192                 return rte_flow_error_set(error, rte_errno,
2193                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2194                                           "failed to obtain E-Switch info for"
2195                                           " port");
2196         dev_priv = mlx5_dev_to_eswitch_info(dev);
2197         if (!dev_priv)
2198                 return rte_flow_error_set(error, rte_errno,
2199                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2200                                           NULL,
2201                                           "failed to obtain E-Switch info");
2202         if (esw_priv->domain_id != dev_priv->domain_id)
2203                 return rte_flow_error_set(error, EINVAL,
2204                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2205                                           "cannot match on a port from a"
2206                                           " different E-Switch");
2207         return 0;
2208 }
2209
2210 /**
2211  * Validate VLAN item.
2212  *
2213  * @param[in] item
2214  *   Item specification.
2215  * @param[in] item_flags
2216  *   Bit-fields that holds the items detected until now.
2217  * @param[in] dev
2218  *   Ethernet device flow is being created on.
2219  * @param[out] error
2220  *   Pointer to error structure.
2221  *
2222  * @return
2223  *   0 on success, a negative errno value otherwise and rte_errno is set.
2224  */
2225 static int
2226 flow_dv_validate_item_vlan(const struct rte_flow_item *item,
2227                            uint64_t item_flags,
2228                            struct rte_eth_dev *dev,
2229                            struct rte_flow_error *error)
2230 {
2231         const struct rte_flow_item_vlan *mask = item->mask;
2232         const struct rte_flow_item_vlan nic_mask = {
2233                 .tci = RTE_BE16(UINT16_MAX),
2234                 .inner_type = RTE_BE16(UINT16_MAX),
2235                 .has_more_vlan = 1,
2236         };
2237         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2238         int ret;
2239         const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
2240                                         MLX5_FLOW_LAYER_INNER_L4) :
2241                                        (MLX5_FLOW_LAYER_OUTER_L3 |
2242                                         MLX5_FLOW_LAYER_OUTER_L4);
2243         const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
2244                                         MLX5_FLOW_LAYER_OUTER_VLAN;
2245
2246         if (item_flags & vlanm)
2247                 return rte_flow_error_set(error, EINVAL,
2248                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2249                                           "multiple VLAN layers not supported");
2250         else if ((item_flags & l34m) != 0)
2251                 return rte_flow_error_set(error, EINVAL,
2252                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2253                                           "VLAN cannot follow L3/L4 layer");
2254         if (!mask)
2255                 mask = &rte_flow_item_vlan_mask;
2256         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2257                                         (const uint8_t *)&nic_mask,
2258                                         sizeof(struct rte_flow_item_vlan),
2259                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2260         if (ret)
2261                 return ret;
2262         if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
2263                 struct mlx5_priv *priv = dev->data->dev_private;
2264
2265                 if (priv->vmwa_context) {
2266                         /*
2267                          * Non-NULL context means we have a virtual machine
2268                          * and SR-IOV enabled, we have to create VLAN interface
2269                          * to make hypervisor to setup E-Switch vport
2270                          * context correctly. We avoid creating the multiple
2271                          * VLAN interfaces, so we cannot support VLAN tag mask.
2272                          */
2273                         return rte_flow_error_set(error, EINVAL,
2274                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2275                                                   item,
2276                                                   "VLAN tag mask is not"
2277                                                   " supported in virtual"
2278                                                   " environment");
2279                 }
2280         }
2281         return 0;
2282 }
2283
2284 /*
2285  * GTP flags are contained in 1 byte of the format:
2286  * -------------------------------------------
2287  * | bit   | 0 - 2   | 3  | 4   | 5 | 6 | 7  |
2288  * |-----------------------------------------|
2289  * | value | Version | PT | Res | E | S | PN |
2290  * -------------------------------------------
2291  *
2292  * Matching is supported only for GTP flags E, S, PN.
2293  */
2294 #define MLX5_GTP_FLAGS_MASK     0x07
2295
2296 /**
2297  * Validate GTP item.
2298  *
2299  * @param[in] dev
2300  *   Pointer to the rte_eth_dev structure.
2301  * @param[in] item
2302  *   Item specification.
2303  * @param[in] item_flags
2304  *   Bit-fields that holds the items detected until now.
2305  * @param[out] error
2306  *   Pointer to error structure.
2307  *
2308  * @return
2309  *   0 on success, a negative errno value otherwise and rte_errno is set.
2310  */
2311 static int
2312 flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
2313                           const struct rte_flow_item *item,
2314                           uint64_t item_flags,
2315                           struct rte_flow_error *error)
2316 {
2317         struct mlx5_priv *priv = dev->data->dev_private;
2318         const struct rte_flow_item_gtp *spec = item->spec;
2319         const struct rte_flow_item_gtp *mask = item->mask;
2320         const struct rte_flow_item_gtp nic_mask = {
2321                 .v_pt_rsv_flags = MLX5_GTP_FLAGS_MASK,
2322                 .msg_type = 0xff,
2323                 .teid = RTE_BE32(0xffffffff),
2324         };
2325
2326         if (!priv->config.hca_attr.tunnel_stateless_gtp)
2327                 return rte_flow_error_set(error, ENOTSUP,
2328                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2329                                           "GTP support is not enabled");
2330         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2331                 return rte_flow_error_set(error, ENOTSUP,
2332                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2333                                           "multiple tunnel layers not"
2334                                           " supported");
2335         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2336                 return rte_flow_error_set(error, EINVAL,
2337                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2338                                           "no outer UDP layer found");
2339         if (!mask)
2340                 mask = &rte_flow_item_gtp_mask;
2341         if (spec && spec->v_pt_rsv_flags & ~MLX5_GTP_FLAGS_MASK)
2342                 return rte_flow_error_set(error, ENOTSUP,
2343                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2344                                           "Match is supported for GTP"
2345                                           " flags only");
2346         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2347                                          (const uint8_t *)&nic_mask,
2348                                          sizeof(struct rte_flow_item_gtp),
2349                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2350 }
2351
2352 /**
2353  * Validate GTP PSC item.
2354  *
2355  * @param[in] item
2356  *   Item specification.
2357  * @param[in] last_item
2358  *   Previous validated item in the pattern items.
2359  * @param[in] gtp_item
2360  *   Previous GTP item specification.
2361  * @param[in] attr
2362  *   Pointer to flow attributes.
2363  * @param[out] error
2364  *   Pointer to error structure.
2365  *
2366  * @return
2367  *   0 on success, a negative errno value otherwise and rte_errno is set.
2368  */
2369 static int
2370 flow_dv_validate_item_gtp_psc(const struct rte_flow_item *item,
2371                               uint64_t last_item,
2372                               const struct rte_flow_item *gtp_item,
2373                               const struct rte_flow_attr *attr,
2374                               struct rte_flow_error *error)
2375 {
2376         const struct rte_flow_item_gtp *gtp_spec;
2377         const struct rte_flow_item_gtp *gtp_mask;
2378         const struct rte_flow_item_gtp_psc *spec;
2379         const struct rte_flow_item_gtp_psc *mask;
2380         const struct rte_flow_item_gtp_psc nic_mask = {
2381                 .pdu_type = 0xFF,
2382                 .qfi = 0xFF,
2383         };
2384
2385         if (!gtp_item || !(last_item & MLX5_FLOW_LAYER_GTP))
2386                 return rte_flow_error_set
2387                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2388                          "GTP PSC item must be preceded with GTP item");
2389         gtp_spec = gtp_item->spec;
2390         gtp_mask = gtp_item->mask ? gtp_item->mask : &rte_flow_item_gtp_mask;
2391         /* GTP spec and E flag is requested to match zero. */
2392         if (gtp_spec &&
2393                 (gtp_mask->v_pt_rsv_flags &
2394                 ~gtp_spec->v_pt_rsv_flags & MLX5_GTP_EXT_HEADER_FLAG))
2395                 return rte_flow_error_set
2396                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2397                          "GTP E flag must be 1 to match GTP PSC");
2398         /* Check the flow is not created in group zero. */
2399         if (!attr->transfer && !attr->group)
2400                 return rte_flow_error_set
2401                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2402                          "GTP PSC is not supported for group 0");
2403         /* GTP spec is here and E flag is requested to match zero. */
2404         if (!item->spec)
2405                 return 0;
2406         spec = item->spec;
2407         mask = item->mask ? item->mask : &rte_flow_item_gtp_psc_mask;
2408         if (spec->pdu_type > MLX5_GTP_EXT_MAX_PDU_TYPE)
2409                 return rte_flow_error_set
2410                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2411                          "PDU type should be smaller than 16");
2412         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2413                                          (const uint8_t *)&nic_mask,
2414                                          sizeof(struct rte_flow_item_gtp_psc),
2415                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2416 }
2417
2418 /**
2419  * Validate IPV4 item.
2420  * Use existing validation function mlx5_flow_validate_item_ipv4(), and
2421  * add specific validation of fragment_offset field,
2422  *
2423  * @param[in] item
2424  *   Item specification.
2425  * @param[in] item_flags
2426  *   Bit-fields that holds the items detected until now.
2427  * @param[out] error
2428  *   Pointer to error structure.
2429  *
2430  * @return
2431  *   0 on success, a negative errno value otherwise and rte_errno is set.
2432  */
2433 static int
2434 flow_dv_validate_item_ipv4(const struct rte_flow_item *item,
2435                            uint64_t item_flags,
2436                            uint64_t last_item,
2437                            uint16_t ether_type,
2438                            struct rte_flow_error *error)
2439 {
2440         int ret;
2441         const struct rte_flow_item_ipv4 *spec = item->spec;
2442         const struct rte_flow_item_ipv4 *last = item->last;
2443         const struct rte_flow_item_ipv4 *mask = item->mask;
2444         rte_be16_t fragment_offset_spec = 0;
2445         rte_be16_t fragment_offset_last = 0;
2446         const struct rte_flow_item_ipv4 nic_ipv4_mask = {
2447                 .hdr = {
2448                         .src_addr = RTE_BE32(0xffffffff),
2449                         .dst_addr = RTE_BE32(0xffffffff),
2450                         .type_of_service = 0xff,
2451                         .fragment_offset = RTE_BE16(0xffff),
2452                         .next_proto_id = 0xff,
2453                         .time_to_live = 0xff,
2454                 },
2455         };
2456
2457         ret = mlx5_flow_validate_item_ipv4(item, item_flags, last_item,
2458                                            ether_type, &nic_ipv4_mask,
2459                                            MLX5_ITEM_RANGE_ACCEPTED, error);
2460         if (ret < 0)
2461                 return ret;
2462         if (spec && mask)
2463                 fragment_offset_spec = spec->hdr.fragment_offset &
2464                                        mask->hdr.fragment_offset;
2465         if (!fragment_offset_spec)
2466                 return 0;
2467         /*
2468          * spec and mask are valid, enforce using full mask to make sure the
2469          * complete value is used correctly.
2470          */
2471         if ((mask->hdr.fragment_offset & RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2472                         != RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2473                 return rte_flow_error_set(error, EINVAL,
2474                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2475                                           item, "must use full mask for"
2476                                           " fragment_offset");
2477         /*
2478          * Match on fragment_offset 0x2000 means MF is 1 and frag-offset is 0,
2479          * indicating this is 1st fragment of fragmented packet.
2480          * This is not yet supported in MLX5, return appropriate error message.
2481          */
2482         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG))
2483                 return rte_flow_error_set(error, ENOTSUP,
2484                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2485                                           "match on first fragment not "
2486                                           "supported");
2487         if (fragment_offset_spec && !last)
2488                 return rte_flow_error_set(error, ENOTSUP,
2489                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2490                                           "specified value not supported");
2491         /* spec and last are valid, validate the specified range. */
2492         fragment_offset_last = last->hdr.fragment_offset &
2493                                mask->hdr.fragment_offset;
2494         /*
2495          * Match on fragment_offset spec 0x2001 and last 0x3fff
2496          * means MF is 1 and frag-offset is > 0.
2497          * This packet is fragment 2nd and onward, excluding last.
2498          * This is not yet supported in MLX5, return appropriate
2499          * error message.
2500          */
2501         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG + 1) &&
2502             fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2503                 return rte_flow_error_set(error, ENOTSUP,
2504                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2505                                           last, "match on following "
2506                                           "fragments not supported");
2507         /*
2508          * Match on fragment_offset spec 0x0001 and last 0x1fff
2509          * means MF is 0 and frag-offset is > 0.
2510          * This packet is last fragment of fragmented packet.
2511          * This is not yet supported in MLX5, return appropriate
2512          * error message.
2513          */
2514         if (fragment_offset_spec == RTE_BE16(1) &&
2515             fragment_offset_last == RTE_BE16(RTE_IPV4_HDR_OFFSET_MASK))
2516                 return rte_flow_error_set(error, ENOTSUP,
2517                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2518                                           last, "match on last "
2519                                           "fragment not supported");
2520         /*
2521          * Match on fragment_offset spec 0x0001 and last 0x3fff
2522          * means MF and/or frag-offset is not 0.
2523          * This is a fragmented packet.
2524          * Other range values are invalid and rejected.
2525          */
2526         if (!(fragment_offset_spec == RTE_BE16(1) &&
2527               fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)))
2528                 return rte_flow_error_set(error, ENOTSUP,
2529                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2530                                           "specified range not supported");
2531         return 0;
2532 }
2533
2534 /**
2535  * Validate IPV6 fragment extension item.
2536  *
2537  * @param[in] item
2538  *   Item specification.
2539  * @param[in] item_flags
2540  *   Bit-fields that holds the items detected until now.
2541  * @param[out] error
2542  *   Pointer to error structure.
2543  *
2544  * @return
2545  *   0 on success, a negative errno value otherwise and rte_errno is set.
2546  */
2547 static int
2548 flow_dv_validate_item_ipv6_frag_ext(const struct rte_flow_item *item,
2549                                     uint64_t item_flags,
2550                                     struct rte_flow_error *error)
2551 {
2552         const struct rte_flow_item_ipv6_frag_ext *spec = item->spec;
2553         const struct rte_flow_item_ipv6_frag_ext *last = item->last;
2554         const struct rte_flow_item_ipv6_frag_ext *mask = item->mask;
2555         rte_be16_t frag_data_spec = 0;
2556         rte_be16_t frag_data_last = 0;
2557         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2558         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2559                                       MLX5_FLOW_LAYER_OUTER_L4;
2560         int ret = 0;
2561         struct rte_flow_item_ipv6_frag_ext nic_mask = {
2562                 .hdr = {
2563                         .next_header = 0xff,
2564                         .frag_data = RTE_BE16(0xffff),
2565                 },
2566         };
2567
2568         if (item_flags & l4m)
2569                 return rte_flow_error_set(error, EINVAL,
2570                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2571                                           "ipv6 fragment extension item cannot "
2572                                           "follow L4 item.");
2573         if ((tunnel && !(item_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
2574             (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6)))
2575                 return rte_flow_error_set(error, EINVAL,
2576                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2577                                           "ipv6 fragment extension item must "
2578                                           "follow ipv6 item");
2579         if (spec && mask)
2580                 frag_data_spec = spec->hdr.frag_data & mask->hdr.frag_data;
2581         if (!frag_data_spec)
2582                 return 0;
2583         /*
2584          * spec and mask are valid, enforce using full mask to make sure the
2585          * complete value is used correctly.
2586          */
2587         if ((mask->hdr.frag_data & RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) !=
2588                                 RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2589                 return rte_flow_error_set(error, EINVAL,
2590                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2591                                           item, "must use full mask for"
2592                                           " frag_data");
2593         /*
2594          * Match on frag_data 0x00001 means M is 1 and frag-offset is 0.
2595          * This is 1st fragment of fragmented packet.
2596          */
2597         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_MF_MASK))
2598                 return rte_flow_error_set(error, ENOTSUP,
2599                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2600                                           "match on first fragment not "
2601                                           "supported");
2602         if (frag_data_spec && !last)
2603                 return rte_flow_error_set(error, EINVAL,
2604                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2605                                           "specified value not supported");
2606         ret = mlx5_flow_item_acceptable
2607                                 (item, (const uint8_t *)mask,
2608                                  (const uint8_t *)&nic_mask,
2609                                  sizeof(struct rte_flow_item_ipv6_frag_ext),
2610                                  MLX5_ITEM_RANGE_ACCEPTED, error);
2611         if (ret)
2612                 return ret;
2613         /* spec and last are valid, validate the specified range. */
2614         frag_data_last = last->hdr.frag_data & mask->hdr.frag_data;
2615         /*
2616          * Match on frag_data spec 0x0009 and last 0xfff9
2617          * means M is 1 and frag-offset is > 0.
2618          * This packet is fragment 2nd and onward, excluding last.
2619          * This is not yet supported in MLX5, return appropriate
2620          * error message.
2621          */
2622         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN |
2623                                        RTE_IPV6_EHDR_MF_MASK) &&
2624             frag_data_last == RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2625                 return rte_flow_error_set(error, ENOTSUP,
2626                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2627                                           last, "match on following "
2628                                           "fragments not supported");
2629         /*
2630          * Match on frag_data spec 0x0008 and last 0xfff8
2631          * means M is 0 and frag-offset is > 0.
2632          * This packet is last fragment of fragmented packet.
2633          * This is not yet supported in MLX5, return appropriate
2634          * error message.
2635          */
2636         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN) &&
2637             frag_data_last == RTE_BE16(RTE_IPV6_EHDR_FO_MASK))
2638                 return rte_flow_error_set(error, ENOTSUP,
2639                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2640                                           last, "match on last "
2641                                           "fragment not supported");
2642         /* Other range values are invalid and rejected. */
2643         return rte_flow_error_set(error, EINVAL,
2644                                   RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2645                                   "specified range not supported");
2646 }
2647
2648 /*
2649  * Validate ASO CT item.
2650  *
2651  * @param[in] dev
2652  *   Pointer to the rte_eth_dev structure.
2653  * @param[in] item
2654  *   Item specification.
2655  * @param[in] item_flags
2656  *   Pointer to bit-fields that holds the items detected until now.
2657  * @param[out] error
2658  *   Pointer to error structure.
2659  *
2660  * @return
2661  *   0 on success, a negative errno value otherwise and rte_errno is set.
2662  */
2663 static int
2664 flow_dv_validate_item_aso_ct(struct rte_eth_dev *dev,
2665                              const struct rte_flow_item *item,
2666                              uint64_t *item_flags,
2667                              struct rte_flow_error *error)
2668 {
2669         const struct rte_flow_item_conntrack *spec = item->spec;
2670         const struct rte_flow_item_conntrack *mask = item->mask;
2671         RTE_SET_USED(dev);
2672         uint32_t flags;
2673
2674         if (*item_flags & MLX5_FLOW_LAYER_ASO_CT)
2675                 return rte_flow_error_set(error, EINVAL,
2676                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2677                                           "Only one CT is supported");
2678         if (!mask)
2679                 mask = &rte_flow_item_conntrack_mask;
2680         flags = spec->flags & mask->flags;
2681         if ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID) &&
2682             ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID) ||
2683              (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD) ||
2684              (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)))
2685                 return rte_flow_error_set(error, EINVAL,
2686                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2687                                           "Conflict status bits");
2688         /* State change also needs to be considered. */
2689         *item_flags |= MLX5_FLOW_LAYER_ASO_CT;
2690         return 0;
2691 }
2692
2693 /**
2694  * Validate the pop VLAN action.
2695  *
2696  * @param[in] dev
2697  *   Pointer to the rte_eth_dev structure.
2698  * @param[in] action_flags
2699  *   Holds the actions detected until now.
2700  * @param[in] action
2701  *   Pointer to the pop vlan action.
2702  * @param[in] item_flags
2703  *   The items found in this flow rule.
2704  * @param[in] attr
2705  *   Pointer to flow attributes.
2706  * @param[out] error
2707  *   Pointer to error structure.
2708  *
2709  * @return
2710  *   0 on success, a negative errno value otherwise and rte_errno is set.
2711  */
2712 static int
2713 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
2714                                  uint64_t action_flags,
2715                                  const struct rte_flow_action *action,
2716                                  uint64_t item_flags,
2717                                  const struct rte_flow_attr *attr,
2718                                  struct rte_flow_error *error)
2719 {
2720         const struct mlx5_priv *priv = dev->data->dev_private;
2721
2722         (void)action;
2723         (void)attr;
2724         if (!priv->sh->pop_vlan_action)
2725                 return rte_flow_error_set(error, ENOTSUP,
2726                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2727                                           NULL,
2728                                           "pop vlan action is not supported");
2729         if (attr->egress)
2730                 return rte_flow_error_set(error, ENOTSUP,
2731                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2732                                           NULL,
2733                                           "pop vlan action not supported for "
2734                                           "egress");
2735         if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
2736                 return rte_flow_error_set(error, ENOTSUP,
2737                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2738                                           "no support for multiple VLAN "
2739                                           "actions");
2740         /* Pop VLAN with preceding Decap requires inner header with VLAN. */
2741         if ((action_flags & MLX5_FLOW_ACTION_DECAP) &&
2742             !(item_flags & MLX5_FLOW_LAYER_INNER_VLAN))
2743                 return rte_flow_error_set(error, ENOTSUP,
2744                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2745                                           NULL,
2746                                           "cannot pop vlan after decap without "
2747                                           "match on inner vlan in the flow");
2748         /* Pop VLAN without preceding Decap requires outer header with VLAN. */
2749         if (!(action_flags & MLX5_FLOW_ACTION_DECAP) &&
2750             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2751                 return rte_flow_error_set(error, ENOTSUP,
2752                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2753                                           NULL,
2754                                           "cannot pop vlan without a "
2755                                           "match on (outer) vlan in the flow");
2756         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2757                 return rte_flow_error_set(error, EINVAL,
2758                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2759                                           "wrong action order, port_id should "
2760                                           "be after pop VLAN action");
2761         if (!attr->transfer && priv->representor)
2762                 return rte_flow_error_set(error, ENOTSUP,
2763                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2764                                           "pop vlan action for VF representor "
2765                                           "not supported on NIC table");
2766         return 0;
2767 }
2768
2769 /**
2770  * Get VLAN default info from vlan match info.
2771  *
2772  * @param[in] items
2773  *   the list of item specifications.
2774  * @param[out] vlan
2775  *   pointer VLAN info to fill to.
2776  *
2777  * @return
2778  *   0 on success, a negative errno value otherwise and rte_errno is set.
2779  */
2780 static void
2781 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
2782                                   struct rte_vlan_hdr *vlan)
2783 {
2784         const struct rte_flow_item_vlan nic_mask = {
2785                 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
2786                                 MLX5DV_FLOW_VLAN_VID_MASK),
2787                 .inner_type = RTE_BE16(0xffff),
2788         };
2789
2790         if (items == NULL)
2791                 return;
2792         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2793                 int type = items->type;
2794
2795                 if (type == RTE_FLOW_ITEM_TYPE_VLAN ||
2796                     type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
2797                         break;
2798         }
2799         if (items->type != RTE_FLOW_ITEM_TYPE_END) {
2800                 const struct rte_flow_item_vlan *vlan_m = items->mask;
2801                 const struct rte_flow_item_vlan *vlan_v = items->spec;
2802
2803                 /* If VLAN item in pattern doesn't contain data, return here. */
2804                 if (!vlan_v)
2805                         return;
2806                 if (!vlan_m)
2807                         vlan_m = &nic_mask;
2808                 /* Only full match values are accepted */
2809                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
2810                      MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
2811                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
2812                         vlan->vlan_tci |=
2813                                 rte_be_to_cpu_16(vlan_v->tci &
2814                                                  MLX5DV_FLOW_VLAN_PCP_MASK_BE);
2815                 }
2816                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
2817                      MLX5DV_FLOW_VLAN_VID_MASK_BE) {
2818                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
2819                         vlan->vlan_tci |=
2820                                 rte_be_to_cpu_16(vlan_v->tci &
2821                                                  MLX5DV_FLOW_VLAN_VID_MASK_BE);
2822                 }
2823                 if (vlan_m->inner_type == nic_mask.inner_type)
2824                         vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
2825                                                            vlan_m->inner_type);
2826         }
2827 }
2828
2829 /**
2830  * Validate the push VLAN action.
2831  *
2832  * @param[in] dev
2833  *   Pointer to the rte_eth_dev structure.
2834  * @param[in] action_flags
2835  *   Holds the actions detected until now.
2836  * @param[in] item_flags
2837  *   The items found in this flow rule.
2838  * @param[in] action
2839  *   Pointer to the action structure.
2840  * @param[in] attr
2841  *   Pointer to flow attributes
2842  * @param[out] error
2843  *   Pointer to error structure.
2844  *
2845  * @return
2846  *   0 on success, a negative errno value otherwise and rte_errno is set.
2847  */
2848 static int
2849 flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
2850                                   uint64_t action_flags,
2851                                   const struct rte_flow_item_vlan *vlan_m,
2852                                   const struct rte_flow_action *action,
2853                                   const struct rte_flow_attr *attr,
2854                                   struct rte_flow_error *error)
2855 {
2856         const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
2857         const struct mlx5_priv *priv = dev->data->dev_private;
2858
2859         if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
2860             push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
2861                 return rte_flow_error_set(error, EINVAL,
2862                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2863                                           "invalid vlan ethertype");
2864         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2865                 return rte_flow_error_set(error, EINVAL,
2866                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2867                                           "wrong action order, port_id should "
2868                                           "be after push VLAN");
2869         if (!attr->transfer && priv->representor)
2870                 return rte_flow_error_set(error, ENOTSUP,
2871                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2872                                           "push vlan action for VF representor "
2873                                           "not supported on NIC table");
2874         if (vlan_m &&
2875             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) &&
2876             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) !=
2877                 MLX5DV_FLOW_VLAN_PCP_MASK_BE &&
2878             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) &&
2879             !(mlx5_flow_find_action
2880                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)))
2881                 return rte_flow_error_set(error, EINVAL,
2882                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2883                                           "not full match mask on VLAN PCP and "
2884                                           "there is no of_set_vlan_pcp action, "
2885                                           "push VLAN action cannot figure out "
2886                                           "PCP value");
2887         if (vlan_m &&
2888             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) &&
2889             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) !=
2890                 MLX5DV_FLOW_VLAN_VID_MASK_BE &&
2891             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) &&
2892             !(mlx5_flow_find_action
2893                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)))
2894                 return rte_flow_error_set(error, EINVAL,
2895                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2896                                           "not full match mask on VLAN VID and "
2897                                           "there is no of_set_vlan_vid action, "
2898                                           "push VLAN action cannot figure out "
2899                                           "VID value");
2900         (void)attr;
2901         return 0;
2902 }
2903
2904 /**
2905  * Validate the set VLAN PCP.
2906  *
2907  * @param[in] action_flags
2908  *   Holds the actions detected until now.
2909  * @param[in] actions
2910  *   Pointer to the list of actions remaining in the flow rule.
2911  * @param[out] error
2912  *   Pointer to error structure.
2913  *
2914  * @return
2915  *   0 on success, a negative errno value otherwise and rte_errno is set.
2916  */
2917 static int
2918 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
2919                                      const struct rte_flow_action actions[],
2920                                      struct rte_flow_error *error)
2921 {
2922         const struct rte_flow_action *action = actions;
2923         const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
2924
2925         if (conf->vlan_pcp > 7)
2926                 return rte_flow_error_set(error, EINVAL,
2927                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2928                                           "VLAN PCP value is too big");
2929         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
2930                 return rte_flow_error_set(error, ENOTSUP,
2931                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2932                                           "set VLAN PCP action must follow "
2933                                           "the push VLAN action");
2934         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
2935                 return rte_flow_error_set(error, ENOTSUP,
2936                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2937                                           "Multiple VLAN PCP modification are "
2938                                           "not supported");
2939         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2940                 return rte_flow_error_set(error, EINVAL,
2941                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2942                                           "wrong action order, port_id should "
2943                                           "be after set VLAN PCP");
2944         return 0;
2945 }
2946
2947 /**
2948  * Validate the set VLAN VID.
2949  *
2950  * @param[in] item_flags
2951  *   Holds the items detected in this rule.
2952  * @param[in] action_flags
2953  *   Holds the actions detected until now.
2954  * @param[in] actions
2955  *   Pointer to the list of actions remaining in the flow rule.
2956  * @param[out] error
2957  *   Pointer to error structure.
2958  *
2959  * @return
2960  *   0 on success, a negative errno value otherwise and rte_errno is set.
2961  */
2962 static int
2963 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
2964                                      uint64_t action_flags,
2965                                      const struct rte_flow_action actions[],
2966                                      struct rte_flow_error *error)
2967 {
2968         const struct rte_flow_action *action = actions;
2969         const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
2970
2971         if (rte_be_to_cpu_16(conf->vlan_vid) > 0xFFE)
2972                 return rte_flow_error_set(error, EINVAL,
2973                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2974                                           "VLAN VID value is too big");
2975         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
2976             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2977                 return rte_flow_error_set(error, ENOTSUP,
2978                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2979                                           "set VLAN VID action must follow push"
2980                                           " VLAN action or match on VLAN item");
2981         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
2982                 return rte_flow_error_set(error, ENOTSUP,
2983                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2984                                           "Multiple VLAN VID modifications are "
2985                                           "not supported");
2986         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2987                 return rte_flow_error_set(error, EINVAL,
2988                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2989                                           "wrong action order, port_id should "
2990                                           "be after set VLAN VID");
2991         return 0;
2992 }
2993
2994 /*
2995  * Validate the FLAG action.
2996  *
2997  * @param[in] dev
2998  *   Pointer to the rte_eth_dev structure.
2999  * @param[in] action_flags
3000  *   Holds the actions detected until now.
3001  * @param[in] attr
3002  *   Pointer to flow attributes
3003  * @param[out] error
3004  *   Pointer to error structure.
3005  *
3006  * @return
3007  *   0 on success, a negative errno value otherwise and rte_errno is set.
3008  */
3009 static int
3010 flow_dv_validate_action_flag(struct rte_eth_dev *dev,
3011                              uint64_t action_flags,
3012                              const struct rte_flow_attr *attr,
3013                              struct rte_flow_error *error)
3014 {
3015         struct mlx5_priv *priv = dev->data->dev_private;
3016         struct mlx5_dev_config *config = &priv->config;
3017         int ret;
3018
3019         /* Fall back if no extended metadata register support. */
3020         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
3021                 return mlx5_flow_validate_action_flag(action_flags, attr,
3022                                                       error);
3023         /* Extensive metadata mode requires registers. */
3024         if (!mlx5_flow_ext_mreg_supported(dev))
3025                 return rte_flow_error_set(error, ENOTSUP,
3026                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3027                                           "no metadata registers "
3028                                           "to support flag action");
3029         if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT))
3030                 return rte_flow_error_set(error, ENOTSUP,
3031                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3032                                           "extended metadata register"
3033                                           " isn't available");
3034         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3035         if (ret < 0)
3036                 return ret;
3037         MLX5_ASSERT(ret > 0);
3038         if (action_flags & MLX5_FLOW_ACTION_MARK)
3039                 return rte_flow_error_set(error, EINVAL,
3040                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3041                                           "can't mark and flag in same flow");
3042         if (action_flags & MLX5_FLOW_ACTION_FLAG)
3043                 return rte_flow_error_set(error, EINVAL,
3044                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3045                                           "can't have 2 flag"
3046                                           " actions in same flow");
3047         return 0;
3048 }
3049
3050 /**
3051  * Validate MARK action.
3052  *
3053  * @param[in] dev
3054  *   Pointer to the rte_eth_dev structure.
3055  * @param[in] action
3056  *   Pointer to action.
3057  * @param[in] action_flags
3058  *   Holds the actions detected until now.
3059  * @param[in] attr
3060  *   Pointer to flow attributes
3061  * @param[out] error
3062  *   Pointer to error structure.
3063  *
3064  * @return
3065  *   0 on success, a negative errno value otherwise and rte_errno is set.
3066  */
3067 static int
3068 flow_dv_validate_action_mark(struct rte_eth_dev *dev,
3069                              const struct rte_flow_action *action,
3070                              uint64_t action_flags,
3071                              const struct rte_flow_attr *attr,
3072                              struct rte_flow_error *error)
3073 {
3074         struct mlx5_priv *priv = dev->data->dev_private;
3075         struct mlx5_dev_config *config = &priv->config;
3076         const struct rte_flow_action_mark *mark = action->conf;
3077         int ret;
3078
3079         if (is_tunnel_offload_active(dev))
3080                 return rte_flow_error_set(error, ENOTSUP,
3081                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3082                                           "no mark action "
3083                                           "if tunnel offload active");
3084         /* Fall back if no extended metadata register support. */
3085         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
3086                 return mlx5_flow_validate_action_mark(action, action_flags,
3087                                                       attr, error);
3088         /* Extensive metadata mode requires registers. */
3089         if (!mlx5_flow_ext_mreg_supported(dev))
3090                 return rte_flow_error_set(error, ENOTSUP,
3091                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3092                                           "no metadata registers "
3093                                           "to support mark action");
3094         if (!priv->sh->dv_mark_mask)
3095                 return rte_flow_error_set(error, ENOTSUP,
3096                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3097                                           "extended metadata register"
3098                                           " isn't available");
3099         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3100         if (ret < 0)
3101                 return ret;
3102         MLX5_ASSERT(ret > 0);
3103         if (!mark)
3104                 return rte_flow_error_set(error, EINVAL,
3105                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3106                                           "configuration cannot be null");
3107         if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask))
3108                 return rte_flow_error_set(error, EINVAL,
3109                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3110                                           &mark->id,
3111                                           "mark id exceeds the limit");
3112         if (action_flags & MLX5_FLOW_ACTION_FLAG)
3113                 return rte_flow_error_set(error, EINVAL,
3114                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3115                                           "can't flag and mark in same flow");
3116         if (action_flags & MLX5_FLOW_ACTION_MARK)
3117                 return rte_flow_error_set(error, EINVAL,
3118                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3119                                           "can't have 2 mark actions in same"
3120                                           " flow");
3121         return 0;
3122 }
3123
3124 /**
3125  * Validate SET_META action.
3126  *
3127  * @param[in] dev
3128  *   Pointer to the rte_eth_dev structure.
3129  * @param[in] action
3130  *   Pointer to the action structure.
3131  * @param[in] action_flags
3132  *   Holds the actions detected until now.
3133  * @param[in] attr
3134  *   Pointer to flow attributes
3135  * @param[out] error
3136  *   Pointer to error structure.
3137  *
3138  * @return
3139  *   0 on success, a negative errno value otherwise and rte_errno is set.
3140  */
3141 static int
3142 flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
3143                                  const struct rte_flow_action *action,
3144                                  uint64_t action_flags __rte_unused,
3145                                  const struct rte_flow_attr *attr,
3146                                  struct rte_flow_error *error)
3147 {
3148         const struct rte_flow_action_set_meta *conf;
3149         uint32_t nic_mask = UINT32_MAX;
3150         int reg;
3151
3152         if (!mlx5_flow_ext_mreg_supported(dev))
3153                 return rte_flow_error_set(error, ENOTSUP,
3154                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3155                                           "extended metadata register"
3156                                           " isn't supported");
3157         reg = flow_dv_get_metadata_reg(dev, attr, error);
3158         if (reg < 0)
3159                 return reg;
3160         if (reg == REG_NON)
3161                 return rte_flow_error_set(error, ENOTSUP,
3162                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3163                                           "unavalable extended metadata register");
3164         if (reg != REG_A && reg != REG_B) {
3165                 struct mlx5_priv *priv = dev->data->dev_private;
3166
3167                 nic_mask = priv->sh->dv_meta_mask;
3168         }
3169         if (!(action->conf))
3170                 return rte_flow_error_set(error, EINVAL,
3171                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3172                                           "configuration cannot be null");
3173         conf = (const struct rte_flow_action_set_meta *)action->conf;
3174         if (!conf->mask)
3175                 return rte_flow_error_set(error, EINVAL,
3176                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3177                                           "zero mask doesn't have any effect");
3178         if (conf->mask & ~nic_mask)
3179                 return rte_flow_error_set(error, EINVAL,
3180                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3181                                           "meta data must be within reg C0");
3182         return 0;
3183 }
3184
3185 /**
3186  * Validate SET_TAG action.
3187  *
3188  * @param[in] dev
3189  *   Pointer to the rte_eth_dev structure.
3190  * @param[in] action
3191  *   Pointer to the action structure.
3192  * @param[in] action_flags
3193  *   Holds the actions detected until now.
3194  * @param[in] attr
3195  *   Pointer to flow attributes
3196  * @param[out] error
3197  *   Pointer to error structure.
3198  *
3199  * @return
3200  *   0 on success, a negative errno value otherwise and rte_errno is set.
3201  */
3202 static int
3203 flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
3204                                 const struct rte_flow_action *action,
3205                                 uint64_t action_flags,
3206                                 const struct rte_flow_attr *attr,
3207                                 struct rte_flow_error *error)
3208 {
3209         const struct rte_flow_action_set_tag *conf;
3210         const uint64_t terminal_action_flags =
3211                 MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE |
3212                 MLX5_FLOW_ACTION_RSS;
3213         int ret;
3214
3215         if (!mlx5_flow_ext_mreg_supported(dev))
3216                 return rte_flow_error_set(error, ENOTSUP,
3217                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3218                                           "extensive metadata register"
3219                                           " isn't supported");
3220         if (!(action->conf))
3221                 return rte_flow_error_set(error, EINVAL,
3222                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3223                                           "configuration cannot be null");
3224         conf = (const struct rte_flow_action_set_tag *)action->conf;
3225         if (!conf->mask)
3226                 return rte_flow_error_set(error, EINVAL,
3227                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3228                                           "zero mask doesn't have any effect");
3229         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
3230         if (ret < 0)
3231                 return ret;
3232         if (!attr->transfer && attr->ingress &&
3233             (action_flags & terminal_action_flags))
3234                 return rte_flow_error_set(error, EINVAL,
3235                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3236                                           "set_tag has no effect"
3237                                           " with terminal actions");
3238         return 0;
3239 }
3240
3241 /**
3242  * Check if action counter is shared by either old or new mechanism.
3243  *
3244  * @param[in] action
3245  *   Pointer to the action structure.
3246  *
3247  * @return
3248  *   True when counter is shared, false otherwise.
3249  */
3250 static inline bool
3251 is_shared_action_count(const struct rte_flow_action *action)
3252 {
3253         const struct rte_flow_action_count *count =
3254                         (const struct rte_flow_action_count *)action->conf;
3255
3256         if ((int)action->type == MLX5_RTE_FLOW_ACTION_TYPE_COUNT)
3257                 return true;
3258         return !!(count && count->shared);
3259 }
3260
3261 /**
3262  * Validate count action.
3263  *
3264  * @param[in] dev
3265  *   Pointer to rte_eth_dev structure.
3266  * @param[in] shared
3267  *   Indicator if action is shared.
3268  * @param[in] action_flags
3269  *   Holds the actions detected until now.
3270  * @param[out] error
3271  *   Pointer to error structure.
3272  *
3273  * @return
3274  *   0 on success, a negative errno value otherwise and rte_errno is set.
3275  */
3276 static int
3277 flow_dv_validate_action_count(struct rte_eth_dev *dev, bool shared,
3278                               uint64_t action_flags,
3279                               struct rte_flow_error *error)
3280 {
3281         struct mlx5_priv *priv = dev->data->dev_private;
3282
3283         if (!priv->config.devx)
3284                 goto notsup_err;
3285         if (action_flags & MLX5_FLOW_ACTION_COUNT)
3286                 return rte_flow_error_set(error, EINVAL,
3287                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3288                                           "duplicate count actions set");
3289         if (shared && (action_flags & MLX5_FLOW_ACTION_AGE) &&
3290             !priv->sh->flow_hit_aso_en)
3291                 return rte_flow_error_set(error, EINVAL,
3292                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3293                                           "old age and shared count combination is not supported");
3294 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
3295         return 0;
3296 #endif
3297 notsup_err:
3298         return rte_flow_error_set
3299                       (error, ENOTSUP,
3300                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3301                        NULL,
3302                        "count action not supported");
3303 }
3304
3305 /**
3306  * Validate the L2 encap action.
3307  *
3308  * @param[in] dev
3309  *   Pointer to the rte_eth_dev structure.
3310  * @param[in] action_flags
3311  *   Holds the actions detected until now.
3312  * @param[in] action
3313  *   Pointer to the action structure.
3314  * @param[in] attr
3315  *   Pointer to flow attributes.
3316  * @param[out] error
3317  *   Pointer to error structure.
3318  *
3319  * @return
3320  *   0 on success, a negative errno value otherwise and rte_errno is set.
3321  */
3322 static int
3323 flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
3324                                  uint64_t action_flags,
3325                                  const struct rte_flow_action *action,
3326                                  const struct rte_flow_attr *attr,
3327                                  struct rte_flow_error *error)
3328 {
3329         const struct mlx5_priv *priv = dev->data->dev_private;
3330
3331         if (!(action->conf))
3332                 return rte_flow_error_set(error, EINVAL,
3333                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3334                                           "configuration cannot be null");
3335         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3336                 return rte_flow_error_set(error, EINVAL,
3337                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3338                                           "can only have a single encap action "
3339                                           "in a flow");
3340         if (!attr->transfer && priv->representor)
3341                 return rte_flow_error_set(error, ENOTSUP,
3342                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3343                                           "encap action for VF representor "
3344                                           "not supported on NIC table");
3345         return 0;
3346 }
3347
3348 /**
3349  * Validate a decap action.
3350  *
3351  * @param[in] dev
3352  *   Pointer to the rte_eth_dev structure.
3353  * @param[in] action_flags
3354  *   Holds the actions detected until now.
3355  * @param[in] action
3356  *   Pointer to the action structure.
3357  * @param[in] item_flags
3358  *   Holds the items detected.
3359  * @param[in] attr
3360  *   Pointer to flow attributes
3361  * @param[out] error
3362  *   Pointer to error structure.
3363  *
3364  * @return
3365  *   0 on success, a negative errno value otherwise and rte_errno is set.
3366  */
3367 static int
3368 flow_dv_validate_action_decap(struct rte_eth_dev *dev,
3369                               uint64_t action_flags,
3370                               const struct rte_flow_action *action,
3371                               const uint64_t item_flags,
3372                               const struct rte_flow_attr *attr,
3373                               struct rte_flow_error *error)
3374 {
3375         const struct mlx5_priv *priv = dev->data->dev_private;
3376
3377         if (priv->config.hca_attr.scatter_fcs_w_decap_disable &&
3378             !priv->config.decap_en)
3379                 return rte_flow_error_set(error, ENOTSUP,
3380                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3381                                           "decap is not enabled");
3382         if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
3383                 return rte_flow_error_set(error, ENOTSUP,
3384                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3385                                           action_flags &
3386                                           MLX5_FLOW_ACTION_DECAP ? "can only "
3387                                           "have a single decap action" : "decap "
3388                                           "after encap is not supported");
3389         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
3390                 return rte_flow_error_set(error, EINVAL,
3391                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3392                                           "can't have decap action after"
3393                                           " modify action");
3394         if (attr->egress)
3395                 return rte_flow_error_set(error, ENOTSUP,
3396                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
3397                                           NULL,
3398                                           "decap action not supported for "
3399                                           "egress");
3400         if (!attr->transfer && priv->representor)
3401                 return rte_flow_error_set(error, ENOTSUP,
3402                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3403                                           "decap action for VF representor "
3404                                           "not supported on NIC table");
3405         if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP &&
3406             !(item_flags & MLX5_FLOW_LAYER_VXLAN))
3407                 return rte_flow_error_set(error, ENOTSUP,
3408                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3409                                 "VXLAN item should be present for VXLAN decap");
3410         return 0;
3411 }
3412
3413 const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,};
3414
3415 /**
3416  * Validate the raw encap and decap actions.
3417  *
3418  * @param[in] dev
3419  *   Pointer to the rte_eth_dev structure.
3420  * @param[in] decap
3421  *   Pointer to the decap action.
3422  * @param[in] encap
3423  *   Pointer to the encap action.
3424  * @param[in] attr
3425  *   Pointer to flow attributes
3426  * @param[in/out] action_flags
3427  *   Holds the actions detected until now.
3428  * @param[out] actions_n
3429  *   pointer to the number of actions counter.
3430  * @param[in] action
3431  *   Pointer to the action structure.
3432  * @param[in] item_flags
3433  *   Holds the items detected.
3434  * @param[out] error
3435  *   Pointer to error structure.
3436  *
3437  * @return
3438  *   0 on success, a negative errno value otherwise and rte_errno is set.
3439  */
3440 static int
3441 flow_dv_validate_action_raw_encap_decap
3442         (struct rte_eth_dev *dev,
3443          const struct rte_flow_action_raw_decap *decap,
3444          const struct rte_flow_action_raw_encap *encap,
3445          const struct rte_flow_attr *attr, uint64_t *action_flags,
3446          int *actions_n, const struct rte_flow_action *action,
3447          uint64_t item_flags, struct rte_flow_error *error)
3448 {
3449         const struct mlx5_priv *priv = dev->data->dev_private;
3450         int ret;
3451
3452         if (encap && (!encap->size || !encap->data))
3453                 return rte_flow_error_set(error, EINVAL,
3454                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3455                                           "raw encap data cannot be empty");
3456         if (decap && encap) {
3457                 if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE &&
3458                     encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
3459                         /* L3 encap. */
3460                         decap = NULL;
3461                 else if (encap->size <=
3462                            MLX5_ENCAPSULATION_DECISION_SIZE &&
3463                            decap->size >
3464                            MLX5_ENCAPSULATION_DECISION_SIZE)
3465                         /* L3 decap. */
3466                         encap = NULL;
3467                 else if (encap->size >
3468                            MLX5_ENCAPSULATION_DECISION_SIZE &&
3469                            decap->size >
3470                            MLX5_ENCAPSULATION_DECISION_SIZE)
3471                         /* 2 L2 actions: encap and decap. */
3472                         ;
3473                 else
3474                         return rte_flow_error_set(error,
3475                                 ENOTSUP,
3476                                 RTE_FLOW_ERROR_TYPE_ACTION,
3477                                 NULL, "unsupported too small "
3478                                 "raw decap and too small raw "
3479                                 "encap combination");
3480         }
3481         if (decap) {
3482                 ret = flow_dv_validate_action_decap(dev, *action_flags, action,
3483                                                     item_flags, attr, error);
3484                 if (ret < 0)
3485                         return ret;
3486                 *action_flags |= MLX5_FLOW_ACTION_DECAP;
3487                 ++(*actions_n);
3488         }
3489         if (encap) {
3490                 if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE)
3491                         return rte_flow_error_set(error, ENOTSUP,
3492                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3493                                                   NULL,
3494                                                   "small raw encap size");
3495                 if (*action_flags & MLX5_FLOW_ACTION_ENCAP)
3496                         return rte_flow_error_set(error, EINVAL,
3497                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3498                                                   NULL,
3499                                                   "more than one encap action");
3500                 if (!attr->transfer && priv->representor)
3501                         return rte_flow_error_set
3502                                         (error, ENOTSUP,
3503                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3504                                          "encap action for VF representor "
3505                                          "not supported on NIC table");
3506                 *action_flags |= MLX5_FLOW_ACTION_ENCAP;
3507                 ++(*actions_n);
3508         }
3509         return 0;
3510 }
3511
3512 /*
3513  * Validate the ASO CT action.
3514  *
3515  * @param[in] dev
3516  *   Pointer to the rte_eth_dev structure.
3517  * @param[in] action_flags
3518  *   Holds the actions detected until now.
3519  * @param[in] item_flags
3520  *   The items found in this flow rule.
3521  * @param[in] attr
3522  *   Pointer to flow attributes.
3523  * @param[out] error
3524  *   Pointer to error structure.
3525  *
3526  * @return
3527  *   0 on success, a negative errno value otherwise and rte_errno is set.
3528  */
3529 static int
3530 flow_dv_validate_action_aso_ct(struct rte_eth_dev *dev,
3531                                uint64_t action_flags,
3532                                uint64_t item_flags,
3533                                const struct rte_flow_attr *attr,
3534                                struct rte_flow_error *error)
3535 {
3536         RTE_SET_USED(dev);
3537
3538         if (attr->group == 0 && !attr->transfer)
3539                 return rte_flow_error_set(error, ENOTSUP,
3540                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3541                                           NULL,
3542                                           "Only support non-root table");
3543         if (action_flags & MLX5_FLOW_FATE_ACTIONS)
3544                 return rte_flow_error_set(error, ENOTSUP,
3545                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3546                                           "CT cannot follow a fate action");
3547         if ((action_flags & MLX5_FLOW_ACTION_METER) ||
3548             (action_flags & MLX5_FLOW_ACTION_AGE))
3549                 return rte_flow_error_set(error, EINVAL,
3550                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3551                                           "Only one ASO action is supported");
3552         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3553                 return rte_flow_error_set(error, EINVAL,
3554                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3555                                           "Encap cannot exist before CT");
3556         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
3557                 return rte_flow_error_set(error, EINVAL,
3558                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3559                                           "Not a outer TCP packet");
3560         return 0;
3561 }
3562
3563 /**
3564  * Match encap_decap resource.
3565  *
3566  * @param list
3567  *   Pointer to the hash list.
3568  * @param entry
3569  *   Pointer to exist resource entry object.
3570  * @param key
3571  *   Key of the new entry.
3572  * @param ctx_cb
3573  *   Pointer to new encap_decap resource.
3574  *
3575  * @return
3576  *   0 on matching, none-zero otherwise.
3577  */
3578 int
3579 flow_dv_encap_decap_match_cb(struct mlx5_hlist *list __rte_unused,
3580                              struct mlx5_hlist_entry *entry,
3581                              uint64_t key __rte_unused, void *cb_ctx)
3582 {
3583         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3584         struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
3585         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
3586
3587         cache_resource = container_of(entry,
3588                                       struct mlx5_flow_dv_encap_decap_resource,
3589                                       entry);
3590         if (resource->reformat_type == cache_resource->reformat_type &&
3591             resource->ft_type == cache_resource->ft_type &&
3592             resource->flags == cache_resource->flags &&
3593             resource->size == cache_resource->size &&
3594             !memcmp((const void *)resource->buf,
3595                     (const void *)cache_resource->buf,
3596                     resource->size))
3597                 return 0;
3598         return -1;
3599 }
3600
3601 /**
3602  * Allocate encap_decap resource.
3603  *
3604  * @param list
3605  *   Pointer to the hash list.
3606  * @param entry
3607  *   Pointer to exist resource entry object.
3608  * @param ctx_cb
3609  *   Pointer to new encap_decap resource.
3610  *
3611  * @return
3612  *   0 on matching, none-zero otherwise.
3613  */
3614 struct mlx5_hlist_entry *
3615 flow_dv_encap_decap_create_cb(struct mlx5_hlist *list,
3616                               uint64_t key __rte_unused,
3617                               void *cb_ctx)
3618 {
3619         struct mlx5_dev_ctx_shared *sh = list->ctx;
3620         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3621         struct mlx5dv_dr_domain *domain;
3622         struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
3623         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
3624         uint32_t idx;
3625         int ret;
3626
3627         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3628                 domain = sh->fdb_domain;
3629         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3630                 domain = sh->rx_domain;
3631         else
3632                 domain = sh->tx_domain;
3633         /* Register new encap/decap resource. */
3634         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
3635                                        &idx);
3636         if (!cache_resource) {
3637                 rte_flow_error_set(ctx->error, ENOMEM,
3638                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3639                                    "cannot allocate resource memory");
3640                 return NULL;
3641         }
3642         *cache_resource = *resource;
3643         cache_resource->idx = idx;
3644         ret = mlx5_flow_os_create_flow_action_packet_reformat
3645                                         (sh->ctx, domain, cache_resource,
3646                                          &cache_resource->action);
3647         if (ret) {
3648                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
3649                 rte_flow_error_set(ctx->error, ENOMEM,
3650                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3651                                    NULL, "cannot create action");
3652                 return NULL;
3653         }
3654
3655         return &cache_resource->entry;
3656 }
3657
3658 /**
3659  * Find existing encap/decap resource or create and register a new one.
3660  *
3661  * @param[in, out] dev
3662  *   Pointer to rte_eth_dev structure.
3663  * @param[in, out] resource
3664  *   Pointer to encap/decap resource.
3665  * @parm[in, out] dev_flow
3666  *   Pointer to the dev_flow.
3667  * @param[out] error
3668  *   pointer to error structure.
3669  *
3670  * @return
3671  *   0 on success otherwise -errno and errno is set.
3672  */
3673 static int
3674 flow_dv_encap_decap_resource_register
3675                         (struct rte_eth_dev *dev,
3676                          struct mlx5_flow_dv_encap_decap_resource *resource,
3677                          struct mlx5_flow *dev_flow,
3678                          struct rte_flow_error *error)
3679 {
3680         struct mlx5_priv *priv = dev->data->dev_private;
3681         struct mlx5_dev_ctx_shared *sh = priv->sh;
3682         struct mlx5_hlist_entry *entry;
3683         union {
3684                 struct {
3685                         uint32_t ft_type:8;
3686                         uint32_t refmt_type:8;
3687                         /*
3688                          * Header reformat actions can be shared between
3689                          * non-root tables. One bit to indicate non-root
3690                          * table or not.
3691                          */
3692                         uint32_t is_root:1;
3693                         uint32_t reserve:15;
3694                 };
3695                 uint32_t v32;
3696         } encap_decap_key = {
3697                 {
3698                         .ft_type = resource->ft_type,
3699                         .refmt_type = resource->reformat_type,
3700                         .is_root = !!dev_flow->dv.group,
3701                         .reserve = 0,
3702                 }
3703         };
3704         struct mlx5_flow_cb_ctx ctx = {
3705                 .error = error,
3706                 .data = resource,
3707         };
3708         uint64_t key64;
3709
3710         resource->flags = dev_flow->dv.group ? 0 : 1;
3711         key64 =  __rte_raw_cksum(&encap_decap_key.v32,
3712                                  sizeof(encap_decap_key.v32), 0);
3713         if (resource->reformat_type !=
3714             MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2 &&
3715             resource->size)
3716                 key64 = __rte_raw_cksum(resource->buf, resource->size, key64);
3717         entry = mlx5_hlist_register(sh->encaps_decaps, key64, &ctx);
3718         if (!entry)
3719                 return -rte_errno;
3720         resource = container_of(entry, typeof(*resource), entry);
3721         dev_flow->dv.encap_decap = resource;
3722         dev_flow->handle->dvh.rix_encap_decap = resource->idx;
3723         return 0;
3724 }
3725
3726 /**
3727  * Find existing table jump resource or create and register a new one.
3728  *
3729  * @param[in, out] dev
3730  *   Pointer to rte_eth_dev structure.
3731  * @param[in, out] tbl
3732  *   Pointer to flow table resource.
3733  * @parm[in, out] dev_flow
3734  *   Pointer to the dev_flow.
3735  * @param[out] error
3736  *   pointer to error structure.
3737  *
3738  * @return
3739  *   0 on success otherwise -errno and errno is set.
3740  */
3741 static int
3742 flow_dv_jump_tbl_resource_register
3743                         (struct rte_eth_dev *dev __rte_unused,
3744                          struct mlx5_flow_tbl_resource *tbl,
3745                          struct mlx5_flow *dev_flow,
3746                          struct rte_flow_error *error __rte_unused)
3747 {
3748         struct mlx5_flow_tbl_data_entry *tbl_data =
3749                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
3750
3751         MLX5_ASSERT(tbl);
3752         MLX5_ASSERT(tbl_data->jump.action);
3753         dev_flow->handle->rix_jump = tbl_data->idx;
3754         dev_flow->dv.jump = &tbl_data->jump;
3755         return 0;
3756 }
3757
3758 int
3759 flow_dv_port_id_match_cb(struct mlx5_cache_list *list __rte_unused,
3760                          struct mlx5_cache_entry *entry, void *cb_ctx)
3761 {
3762         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3763         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3764         struct mlx5_flow_dv_port_id_action_resource *res =
3765                         container_of(entry, typeof(*res), entry);
3766
3767         return ref->port_id != res->port_id;
3768 }
3769
3770 struct mlx5_cache_entry *
3771 flow_dv_port_id_create_cb(struct mlx5_cache_list *list,
3772                           struct mlx5_cache_entry *entry __rte_unused,
3773                           void *cb_ctx)
3774 {
3775         struct mlx5_dev_ctx_shared *sh = list->ctx;
3776         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3777         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3778         struct mlx5_flow_dv_port_id_action_resource *cache;
3779         uint32_t idx;
3780         int ret;
3781
3782         /* Register new port id action resource. */
3783         cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
3784         if (!cache) {
3785                 rte_flow_error_set(ctx->error, ENOMEM,
3786                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3787                                    "cannot allocate port_id action cache memory");
3788                 return NULL;
3789         }
3790         *cache = *ref;
3791         ret = mlx5_flow_os_create_flow_action_dest_port(sh->fdb_domain,
3792                                                         ref->port_id,
3793                                                         &cache->action);
3794         if (ret) {
3795                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], idx);
3796                 rte_flow_error_set(ctx->error, ENOMEM,
3797                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3798                                    "cannot create action");
3799                 return NULL;
3800         }
3801         cache->idx = idx;
3802         return &cache->entry;
3803 }
3804
3805 /**
3806  * Find existing table port ID resource or create and register a new one.
3807  *
3808  * @param[in, out] dev
3809  *   Pointer to rte_eth_dev structure.
3810  * @param[in, out] resource
3811  *   Pointer to port ID action resource.
3812  * @parm[in, out] dev_flow
3813  *   Pointer to the dev_flow.
3814  * @param[out] error
3815  *   pointer to error structure.
3816  *
3817  * @return
3818  *   0 on success otherwise -errno and errno is set.
3819  */
3820 static int
3821 flow_dv_port_id_action_resource_register
3822                         (struct rte_eth_dev *dev,
3823                          struct mlx5_flow_dv_port_id_action_resource *resource,
3824                          struct mlx5_flow *dev_flow,
3825                          struct rte_flow_error *error)
3826 {
3827         struct mlx5_priv *priv = dev->data->dev_private;
3828         struct mlx5_cache_entry *entry;
3829         struct mlx5_flow_dv_port_id_action_resource *cache;
3830         struct mlx5_flow_cb_ctx ctx = {
3831                 .error = error,
3832                 .data = resource,
3833         };
3834
3835         entry = mlx5_cache_register(&priv->sh->port_id_action_list, &ctx);
3836         if (!entry)
3837                 return -rte_errno;
3838         cache = container_of(entry, typeof(*cache), entry);
3839         dev_flow->dv.port_id_action = cache;
3840         dev_flow->handle->rix_port_id_action = cache->idx;
3841         return 0;
3842 }
3843
3844 int
3845 flow_dv_push_vlan_match_cb(struct mlx5_cache_list *list __rte_unused,
3846                          struct mlx5_cache_entry *entry, void *cb_ctx)
3847 {
3848         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3849         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3850         struct mlx5_flow_dv_push_vlan_action_resource *res =
3851                         container_of(entry, typeof(*res), entry);
3852
3853         return ref->vlan_tag != res->vlan_tag || ref->ft_type != res->ft_type;
3854 }
3855
3856 struct mlx5_cache_entry *
3857 flow_dv_push_vlan_create_cb(struct mlx5_cache_list *list,
3858                           struct mlx5_cache_entry *entry __rte_unused,
3859                           void *cb_ctx)
3860 {
3861         struct mlx5_dev_ctx_shared *sh = list->ctx;
3862         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3863         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3864         struct mlx5_flow_dv_push_vlan_action_resource *cache;
3865         struct mlx5dv_dr_domain *domain;
3866         uint32_t idx;
3867         int ret;
3868
3869         /* Register new port id action resource. */
3870         cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
3871         if (!cache) {
3872                 rte_flow_error_set(ctx->error, ENOMEM,
3873                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3874                                    "cannot allocate push_vlan action cache memory");
3875                 return NULL;
3876         }
3877         *cache = *ref;
3878         if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3879                 domain = sh->fdb_domain;
3880         else if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3881                 domain = sh->rx_domain;
3882         else
3883                 domain = sh->tx_domain;
3884         ret = mlx5_flow_os_create_flow_action_push_vlan(domain, ref->vlan_tag,
3885                                                         &cache->action);
3886         if (ret) {
3887                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
3888                 rte_flow_error_set(ctx->error, ENOMEM,
3889                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3890                                    "cannot create push vlan action");
3891                 return NULL;
3892         }
3893         cache->idx = idx;
3894         return &cache->entry;
3895 }
3896
3897 /**
3898  * Find existing push vlan resource or create and register a new one.
3899  *
3900  * @param [in, out] dev
3901  *   Pointer to rte_eth_dev structure.
3902  * @param[in, out] resource
3903  *   Pointer to port ID action resource.
3904  * @parm[in, out] dev_flow
3905  *   Pointer to the dev_flow.
3906  * @param[out] error
3907  *   pointer to error structure.
3908  *
3909  * @return
3910  *   0 on success otherwise -errno and errno is set.
3911  */
3912 static int
3913 flow_dv_push_vlan_action_resource_register
3914                        (struct rte_eth_dev *dev,
3915                         struct mlx5_flow_dv_push_vlan_action_resource *resource,
3916                         struct mlx5_flow *dev_flow,
3917                         struct rte_flow_error *error)
3918 {
3919         struct mlx5_priv *priv = dev->data->dev_private;
3920         struct mlx5_flow_dv_push_vlan_action_resource *cache;
3921         struct mlx5_cache_entry *entry;
3922         struct mlx5_flow_cb_ctx ctx = {
3923                 .error = error,
3924                 .data = resource,
3925         };
3926
3927         entry = mlx5_cache_register(&priv->sh->push_vlan_action_list, &ctx);
3928         if (!entry)
3929                 return -rte_errno;
3930         cache = container_of(entry, typeof(*cache), entry);
3931
3932         dev_flow->handle->dvh.rix_push_vlan = cache->idx;
3933         dev_flow->dv.push_vlan_res = cache;
3934         return 0;
3935 }
3936
3937 /**
3938  * Get the size of specific rte_flow_item_type hdr size
3939  *
3940  * @param[in] item_type
3941  *   Tested rte_flow_item_type.
3942  *
3943  * @return
3944  *   sizeof struct item_type, 0 if void or irrelevant.
3945  */
3946 static size_t
3947 flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type)
3948 {
3949         size_t retval;
3950
3951         switch (item_type) {
3952         case RTE_FLOW_ITEM_TYPE_ETH:
3953                 retval = sizeof(struct rte_ether_hdr);
3954                 break;
3955         case RTE_FLOW_ITEM_TYPE_VLAN:
3956                 retval = sizeof(struct rte_vlan_hdr);
3957                 break;
3958         case RTE_FLOW_ITEM_TYPE_IPV4:
3959                 retval = sizeof(struct rte_ipv4_hdr);
3960                 break;
3961         case RTE_FLOW_ITEM_TYPE_IPV6:
3962                 retval = sizeof(struct rte_ipv6_hdr);
3963                 break;
3964         case RTE_FLOW_ITEM_TYPE_UDP:
3965                 retval = sizeof(struct rte_udp_hdr);
3966                 break;
3967         case RTE_FLOW_ITEM_TYPE_TCP:
3968                 retval = sizeof(struct rte_tcp_hdr);
3969                 break;
3970         case RTE_FLOW_ITEM_TYPE_VXLAN:
3971         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3972                 retval = sizeof(struct rte_vxlan_hdr);
3973                 break;
3974         case RTE_FLOW_ITEM_TYPE_GRE:
3975         case RTE_FLOW_ITEM_TYPE_NVGRE:
3976                 retval = sizeof(struct rte_gre_hdr);
3977                 break;
3978         case RTE_FLOW_ITEM_TYPE_MPLS:
3979                 retval = sizeof(struct rte_mpls_hdr);
3980                 break;
3981         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
3982         default:
3983                 retval = 0;
3984                 break;
3985         }
3986         return retval;
3987 }
3988
3989 #define MLX5_ENCAP_IPV4_VERSION         0x40
3990 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
3991 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
3992 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
3993 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
3994 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
3995 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
3996
3997 /**
3998  * Convert the encap action data from list of rte_flow_item to raw buffer
3999  *
4000  * @param[in] items
4001  *   Pointer to rte_flow_item objects list.
4002  * @param[out] buf
4003  *   Pointer to the output buffer.
4004  * @param[out] size
4005  *   Pointer to the output buffer size.
4006  * @param[out] error
4007  *   Pointer to the error structure.
4008  *
4009  * @return
4010  *   0 on success, a negative errno value otherwise and rte_errno is set.
4011  */
4012 static int
4013 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
4014                            size_t *size, struct rte_flow_error *error)
4015 {
4016         struct rte_ether_hdr *eth = NULL;
4017         struct rte_vlan_hdr *vlan = NULL;
4018         struct rte_ipv4_hdr *ipv4 = NULL;
4019         struct rte_ipv6_hdr *ipv6 = NULL;
4020         struct rte_udp_hdr *udp = NULL;
4021         struct rte_vxlan_hdr *vxlan = NULL;
4022         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
4023         struct rte_gre_hdr *gre = NULL;
4024         size_t len;
4025         size_t temp_size = 0;
4026
4027         if (!items)
4028                 return rte_flow_error_set(error, EINVAL,
4029                                           RTE_FLOW_ERROR_TYPE_ACTION,
4030                                           NULL, "invalid empty data");
4031         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
4032                 len = flow_dv_get_item_hdr_len(items->type);
4033                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
4034                         return rte_flow_error_set(error, EINVAL,
4035                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4036                                                   (void *)items->type,
4037                                                   "items total size is too big"
4038                                                   " for encap action");
4039                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
4040                 switch (items->type) {
4041                 case RTE_FLOW_ITEM_TYPE_ETH:
4042                         eth = (struct rte_ether_hdr *)&buf[temp_size];
4043                         break;
4044                 case RTE_FLOW_ITEM_TYPE_VLAN:
4045                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
4046                         if (!eth)
4047                                 return rte_flow_error_set(error, EINVAL,
4048                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4049                                                 (void *)items->type,
4050                                                 "eth header not found");
4051                         if (!eth->ether_type)
4052                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
4053                         break;
4054                 case RTE_FLOW_ITEM_TYPE_IPV4:
4055                         ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
4056                         if (!vlan && !eth)
4057                                 return rte_flow_error_set(error, EINVAL,
4058                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4059                                                 (void *)items->type,
4060                                                 "neither eth nor vlan"
4061                                                 " header found");
4062                         if (vlan && !vlan->eth_proto)
4063                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
4064                         else if (eth && !eth->ether_type)
4065                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
4066                         if (!ipv4->version_ihl)
4067                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
4068                                                     MLX5_ENCAP_IPV4_IHL_MIN;
4069                         if (!ipv4->time_to_live)
4070                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
4071                         break;
4072                 case RTE_FLOW_ITEM_TYPE_IPV6:
4073                         ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
4074                         if (!vlan && !eth)
4075                                 return rte_flow_error_set(error, EINVAL,
4076                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4077                                                 (void *)items->type,
4078                                                 "neither eth nor vlan"
4079                                                 " header found");
4080                         if (vlan && !vlan->eth_proto)
4081                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
4082                         else if (eth && !eth->ether_type)
4083                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
4084                         if (!ipv6->vtc_flow)
4085                                 ipv6->vtc_flow =
4086                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
4087                         if (!ipv6->hop_limits)
4088                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
4089                         break;
4090                 case RTE_FLOW_ITEM_TYPE_UDP:
4091                         udp = (struct rte_udp_hdr *)&buf[temp_size];
4092                         if (!ipv4 && !ipv6)
4093                                 return rte_flow_error_set(error, EINVAL,
4094                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4095                                                 (void *)items->type,
4096                                                 "ip header not found");
4097                         if (ipv4 && !ipv4->next_proto_id)
4098                                 ipv4->next_proto_id = IPPROTO_UDP;
4099                         else if (ipv6 && !ipv6->proto)
4100                                 ipv6->proto = IPPROTO_UDP;
4101                         break;
4102                 case RTE_FLOW_ITEM_TYPE_VXLAN:
4103                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
4104                         if (!udp)
4105                                 return rte_flow_error_set(error, EINVAL,
4106                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4107                                                 (void *)items->type,
4108                                                 "udp header not found");
4109                         if (!udp->dst_port)
4110                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
4111                         if (!vxlan->vx_flags)
4112                                 vxlan->vx_flags =
4113                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
4114                         break;
4115                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
4116                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
4117                         if (!udp)
4118                                 return rte_flow_error_set(error, EINVAL,
4119                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4120                                                 (void *)items->type,
4121                                                 "udp header not found");
4122                         if (!vxlan_gpe->proto)
4123                                 return rte_flow_error_set(error, EINVAL,
4124                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4125                                                 (void *)items->type,
4126                                                 "next protocol not found");
4127                         if (!udp->dst_port)
4128                                 udp->dst_port =
4129                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
4130                         if (!vxlan_gpe->vx_flags)
4131                                 vxlan_gpe->vx_flags =
4132                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
4133                         break;
4134                 case RTE_FLOW_ITEM_TYPE_GRE:
4135                 case RTE_FLOW_ITEM_TYPE_NVGRE:
4136                         gre = (struct rte_gre_hdr *)&buf[temp_size];
4137                         if (!gre->proto)
4138                                 return rte_flow_error_set(error, EINVAL,
4139                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4140                                                 (void *)items->type,
4141                                                 "next protocol not found");
4142                         if (!ipv4 && !ipv6)
4143                                 return rte_flow_error_set(error, EINVAL,
4144                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4145                                                 (void *)items->type,
4146                                                 "ip header not found");
4147                         if (ipv4 && !ipv4->next_proto_id)
4148                                 ipv4->next_proto_id = IPPROTO_GRE;
4149                         else if (ipv6 && !ipv6->proto)
4150                                 ipv6->proto = IPPROTO_GRE;
4151                         break;
4152                 case RTE_FLOW_ITEM_TYPE_VOID:
4153                         break;
4154                 default:
4155                         return rte_flow_error_set(error, EINVAL,
4156                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4157                                                   (void *)items->type,
4158                                                   "unsupported item type");
4159                         break;
4160                 }
4161                 temp_size += len;
4162         }
4163         *size = temp_size;
4164         return 0;
4165 }
4166
4167 static int
4168 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
4169 {
4170         struct rte_ether_hdr *eth = NULL;
4171         struct rte_vlan_hdr *vlan = NULL;
4172         struct rte_ipv6_hdr *ipv6 = NULL;
4173         struct rte_udp_hdr *udp = NULL;
4174         char *next_hdr;
4175         uint16_t proto;
4176
4177         eth = (struct rte_ether_hdr *)data;
4178         next_hdr = (char *)(eth + 1);
4179         proto = RTE_BE16(eth->ether_type);
4180
4181         /* VLAN skipping */
4182         while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
4183                 vlan = (struct rte_vlan_hdr *)next_hdr;
4184                 proto = RTE_BE16(vlan->eth_proto);
4185                 next_hdr += sizeof(struct rte_vlan_hdr);
4186         }
4187
4188         /* HW calculates IPv4 csum. no need to proceed */
4189         if (proto == RTE_ETHER_TYPE_IPV4)
4190                 return 0;
4191
4192         /* non IPv4/IPv6 header. not supported */
4193         if (proto != RTE_ETHER_TYPE_IPV6) {
4194                 return rte_flow_error_set(error, ENOTSUP,
4195                                           RTE_FLOW_ERROR_TYPE_ACTION,
4196                                           NULL, "Cannot offload non IPv4/IPv6");
4197         }
4198
4199         ipv6 = (struct rte_ipv6_hdr *)next_hdr;
4200
4201         /* ignore non UDP */
4202         if (ipv6->proto != IPPROTO_UDP)
4203                 return 0;
4204
4205         udp = (struct rte_udp_hdr *)(ipv6 + 1);
4206         udp->dgram_cksum = 0;
4207
4208         return 0;
4209 }
4210
4211 /**
4212  * Convert L2 encap action to DV specification.
4213  *
4214  * @param[in] dev
4215  *   Pointer to rte_eth_dev structure.
4216  * @param[in] action
4217  *   Pointer to action structure.
4218  * @param[in, out] dev_flow
4219  *   Pointer to the mlx5_flow.
4220  * @param[in] transfer
4221  *   Mark if the flow is E-Switch flow.
4222  * @param[out] error
4223  *   Pointer to the error structure.
4224  *
4225  * @return
4226  *   0 on success, a negative errno value otherwise and rte_errno is set.
4227  */
4228 static int
4229 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
4230                                const struct rte_flow_action *action,
4231                                struct mlx5_flow *dev_flow,
4232                                uint8_t transfer,
4233                                struct rte_flow_error *error)
4234 {
4235         const struct rte_flow_item *encap_data;
4236         const struct rte_flow_action_raw_encap *raw_encap_data;
4237         struct mlx5_flow_dv_encap_decap_resource res = {
4238                 .reformat_type =
4239                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
4240                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4241                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
4242         };
4243
4244         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
4245                 raw_encap_data =
4246                         (const struct rte_flow_action_raw_encap *)action->conf;
4247                 res.size = raw_encap_data->size;
4248                 memcpy(res.buf, raw_encap_data->data, res.size);
4249         } else {
4250                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
4251                         encap_data =
4252                                 ((const struct rte_flow_action_vxlan_encap *)
4253                                                 action->conf)->definition;
4254                 else
4255                         encap_data =
4256                                 ((const struct rte_flow_action_nvgre_encap *)
4257                                                 action->conf)->definition;
4258                 if (flow_dv_convert_encap_data(encap_data, res.buf,
4259                                                &res.size, error))
4260                         return -rte_errno;
4261         }
4262         if (flow_dv_zero_encap_udp_csum(res.buf, error))
4263                 return -rte_errno;
4264         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4265                 return rte_flow_error_set(error, EINVAL,
4266                                           RTE_FLOW_ERROR_TYPE_ACTION,
4267                                           NULL, "can't create L2 encap action");
4268         return 0;
4269 }
4270
4271 /**
4272  * Convert L2 decap action to DV specification.
4273  *
4274  * @param[in] dev
4275  *   Pointer to rte_eth_dev structure.
4276  * @param[in, out] dev_flow
4277  *   Pointer to the mlx5_flow.
4278  * @param[in] transfer
4279  *   Mark if the flow is E-Switch flow.
4280  * @param[out] error
4281  *   Pointer to the error structure.
4282  *
4283  * @return
4284  *   0 on success, a negative errno value otherwise and rte_errno is set.
4285  */
4286 static int
4287 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
4288                                struct mlx5_flow *dev_flow,
4289                                uint8_t transfer,
4290                                struct rte_flow_error *error)
4291 {
4292         struct mlx5_flow_dv_encap_decap_resource res = {
4293                 .size = 0,
4294                 .reformat_type =
4295                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
4296                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4297                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
4298         };
4299
4300         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4301                 return rte_flow_error_set(error, EINVAL,
4302                                           RTE_FLOW_ERROR_TYPE_ACTION,
4303                                           NULL, "can't create L2 decap action");
4304         return 0;
4305 }
4306
4307 /**
4308  * Convert raw decap/encap (L3 tunnel) action to DV specification.
4309  *
4310  * @param[in] dev
4311  *   Pointer to rte_eth_dev structure.
4312  * @param[in] action
4313  *   Pointer to action structure.
4314  * @param[in, out] dev_flow
4315  *   Pointer to the mlx5_flow.
4316  * @param[in] attr
4317  *   Pointer to the flow attributes.
4318  * @param[out] error
4319  *   Pointer to the error structure.
4320  *
4321  * @return
4322  *   0 on success, a negative errno value otherwise and rte_errno is set.
4323  */
4324 static int
4325 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
4326                                 const struct rte_flow_action *action,
4327                                 struct mlx5_flow *dev_flow,
4328                                 const struct rte_flow_attr *attr,
4329                                 struct rte_flow_error *error)
4330 {
4331         const struct rte_flow_action_raw_encap *encap_data;
4332         struct mlx5_flow_dv_encap_decap_resource res;
4333
4334         memset(&res, 0, sizeof(res));
4335         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
4336         res.size = encap_data->size;
4337         memcpy(res.buf, encap_data->data, res.size);
4338         res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
4339                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
4340                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
4341         if (attr->transfer)
4342                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4343         else
4344                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4345                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4346         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4347                 return rte_flow_error_set(error, EINVAL,
4348                                           RTE_FLOW_ERROR_TYPE_ACTION,
4349                                           NULL, "can't create encap action");
4350         return 0;
4351 }
4352
4353 /**
4354  * Create action push VLAN.
4355  *
4356  * @param[in] dev
4357  *   Pointer to rte_eth_dev structure.
4358  * @param[in] attr
4359  *   Pointer to the flow attributes.
4360  * @param[in] vlan
4361  *   Pointer to the vlan to push to the Ethernet header.
4362  * @param[in, out] dev_flow
4363  *   Pointer to the mlx5_flow.
4364  * @param[out] error
4365  *   Pointer to the error structure.
4366  *
4367  * @return
4368  *   0 on success, a negative errno value otherwise and rte_errno is set.
4369  */
4370 static int
4371 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
4372                                 const struct rte_flow_attr *attr,
4373                                 const struct rte_vlan_hdr *vlan,
4374                                 struct mlx5_flow *dev_flow,
4375                                 struct rte_flow_error *error)
4376 {
4377         struct mlx5_flow_dv_push_vlan_action_resource res;
4378
4379         memset(&res, 0, sizeof(res));
4380         res.vlan_tag =
4381                 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
4382                                  vlan->vlan_tci);
4383         if (attr->transfer)
4384                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4385         else
4386                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4387                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4388         return flow_dv_push_vlan_action_resource_register
4389                                             (dev, &res, dev_flow, error);
4390 }
4391
4392 /**
4393  * Validate the modify-header actions.
4394  *
4395  * @param[in] action_flags
4396  *   Holds the actions detected until now.
4397  * @param[in] action
4398  *   Pointer to the modify action.
4399  * @param[out] error
4400  *   Pointer to error structure.
4401  *
4402  * @return
4403  *   0 on success, a negative errno value otherwise and rte_errno is set.
4404  */
4405 static int
4406 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
4407                                    const struct rte_flow_action *action,
4408                                    struct rte_flow_error *error)
4409 {
4410         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
4411                 return rte_flow_error_set(error, EINVAL,
4412                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4413                                           NULL, "action configuration not set");
4414         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
4415                 return rte_flow_error_set(error, EINVAL,
4416                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4417                                           "can't have encap action before"
4418                                           " modify action");
4419         return 0;
4420 }
4421
4422 /**
4423  * Validate the modify-header MAC address actions.
4424  *
4425  * @param[in] action_flags
4426  *   Holds the actions detected until now.
4427  * @param[in] action
4428  *   Pointer to the modify action.
4429  * @param[in] item_flags
4430  *   Holds the items detected.
4431  * @param[out] error
4432  *   Pointer to error structure.
4433  *
4434  * @return
4435  *   0 on success, a negative errno value otherwise and rte_errno is set.
4436  */
4437 static int
4438 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
4439                                    const struct rte_flow_action *action,
4440                                    const uint64_t item_flags,
4441                                    struct rte_flow_error *error)
4442 {
4443         int ret = 0;
4444
4445         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4446         if (!ret) {
4447                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
4448                         return rte_flow_error_set(error, EINVAL,
4449                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4450                                                   NULL,
4451                                                   "no L2 item in pattern");
4452         }
4453         return ret;
4454 }
4455
4456 /**
4457  * Validate the modify-header IPv4 address actions.
4458  *
4459  * @param[in] action_flags
4460  *   Holds the actions detected until now.
4461  * @param[in] action
4462  *   Pointer to the modify action.
4463  * @param[in] item_flags
4464  *   Holds the items detected.
4465  * @param[out] error
4466  *   Pointer to error structure.
4467  *
4468  * @return
4469  *   0 on success, a negative errno value otherwise and rte_errno is set.
4470  */
4471 static int
4472 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
4473                                     const struct rte_flow_action *action,
4474                                     const uint64_t item_flags,
4475                                     struct rte_flow_error *error)
4476 {
4477         int ret = 0;
4478         uint64_t layer;
4479
4480         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4481         if (!ret) {
4482                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4483                                  MLX5_FLOW_LAYER_INNER_L3_IPV4 :
4484                                  MLX5_FLOW_LAYER_OUTER_L3_IPV4;
4485                 if (!(item_flags & layer))
4486                         return rte_flow_error_set(error, EINVAL,
4487                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4488                                                   NULL,
4489                                                   "no ipv4 item in pattern");
4490         }
4491         return ret;
4492 }
4493
4494 /**
4495  * Validate the modify-header IPv6 address actions.
4496  *
4497  * @param[in] action_flags
4498  *   Holds the actions detected until now.
4499  * @param[in] action
4500  *   Pointer to the modify action.
4501  * @param[in] item_flags
4502  *   Holds the items detected.
4503  * @param[out] error
4504  *   Pointer to error structure.
4505  *
4506  * @return
4507  *   0 on success, a negative errno value otherwise and rte_errno is set.
4508  */
4509 static int
4510 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
4511                                     const struct rte_flow_action *action,
4512                                     const uint64_t item_flags,
4513                                     struct rte_flow_error *error)
4514 {
4515         int ret = 0;
4516         uint64_t layer;
4517
4518         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4519         if (!ret) {
4520                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4521                                  MLX5_FLOW_LAYER_INNER_L3_IPV6 :
4522                                  MLX5_FLOW_LAYER_OUTER_L3_IPV6;
4523                 if (!(item_flags & layer))
4524                         return rte_flow_error_set(error, EINVAL,
4525                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4526                                                   NULL,
4527                                                   "no ipv6 item in pattern");
4528         }
4529         return ret;
4530 }
4531
4532 /**
4533  * Validate the modify-header TP actions.
4534  *
4535  * @param[in] action_flags
4536  *   Holds the actions detected until now.
4537  * @param[in] action
4538  *   Pointer to the modify action.
4539  * @param[in] item_flags
4540  *   Holds the items detected.
4541  * @param[out] error
4542  *   Pointer to error structure.
4543  *
4544  * @return
4545  *   0 on success, a negative errno value otherwise and rte_errno is set.
4546  */
4547 static int
4548 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
4549                                   const struct rte_flow_action *action,
4550                                   const uint64_t item_flags,
4551                                   struct rte_flow_error *error)
4552 {
4553         int ret = 0;
4554         uint64_t layer;
4555
4556         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4557         if (!ret) {
4558                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4559                                  MLX5_FLOW_LAYER_INNER_L4 :
4560                                  MLX5_FLOW_LAYER_OUTER_L4;
4561                 if (!(item_flags & layer))
4562                         return rte_flow_error_set(error, EINVAL,
4563                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4564                                                   NULL, "no transport layer "
4565                                                   "in pattern");
4566         }
4567         return ret;
4568 }
4569
4570 /**
4571  * Validate the modify-header actions of increment/decrement
4572  * TCP Sequence-number.
4573  *
4574  * @param[in] action_flags
4575  *   Holds the actions detected until now.
4576  * @param[in] action
4577  *   Pointer to the modify action.
4578  * @param[in] item_flags
4579  *   Holds the items detected.
4580  * @param[out] error
4581  *   Pointer to error structure.
4582  *
4583  * @return
4584  *   0 on success, a negative errno value otherwise and rte_errno is set.
4585  */
4586 static int
4587 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
4588                                        const struct rte_flow_action *action,
4589                                        const uint64_t item_flags,
4590                                        struct rte_flow_error *error)
4591 {
4592         int ret = 0;
4593         uint64_t layer;
4594
4595         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4596         if (!ret) {
4597                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4598                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
4599                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
4600                 if (!(item_flags & layer))
4601                         return rte_flow_error_set(error, EINVAL,
4602                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4603                                                   NULL, "no TCP item in"
4604                                                   " pattern");
4605                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
4606                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
4607                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
4608                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
4609                         return rte_flow_error_set(error, EINVAL,
4610                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4611                                                   NULL,
4612                                                   "cannot decrease and increase"
4613                                                   " TCP sequence number"
4614                                                   " at the same time");
4615         }
4616         return ret;
4617 }
4618
4619 /**
4620  * Validate the modify-header actions of increment/decrement
4621  * TCP Acknowledgment number.
4622  *
4623  * @param[in] action_flags
4624  *   Holds the actions detected until now.
4625  * @param[in] action
4626  *   Pointer to the modify action.
4627  * @param[in] item_flags
4628  *   Holds the items detected.
4629  * @param[out] error
4630  *   Pointer to error structure.
4631  *
4632  * @return
4633  *   0 on success, a negative errno value otherwise and rte_errno is set.
4634  */
4635 static int
4636 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
4637                                        const struct rte_flow_action *action,
4638                                        const uint64_t item_flags,
4639                                        struct rte_flow_error *error)
4640 {
4641         int ret = 0;
4642         uint64_t layer;
4643
4644         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4645         if (!ret) {
4646                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4647                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
4648                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
4649                 if (!(item_flags & layer))
4650                         return rte_flow_error_set(error, EINVAL,
4651                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4652                                                   NULL, "no TCP item in"
4653                                                   " pattern");
4654                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
4655                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
4656                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
4657                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
4658                         return rte_flow_error_set(error, EINVAL,
4659                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4660                                                   NULL,
4661                                                   "cannot decrease and increase"
4662                                                   " TCP acknowledgment number"
4663                                                   " at the same time");
4664         }
4665         return ret;
4666 }
4667
4668 /**
4669  * Validate the modify-header TTL actions.
4670  *
4671  * @param[in] action_flags
4672  *   Holds the actions detected until now.
4673  * @param[in] action
4674  *   Pointer to the modify action.
4675  * @param[in] item_flags
4676  *   Holds the items detected.
4677  * @param[out] error
4678  *   Pointer to error structure.
4679  *
4680  * @return
4681  *   0 on success, a negative errno value otherwise and rte_errno is set.
4682  */
4683 static int
4684 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
4685                                    const struct rte_flow_action *action,
4686                                    const uint64_t item_flags,
4687                                    struct rte_flow_error *error)
4688 {
4689         int ret = 0;
4690         uint64_t layer;
4691
4692         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4693         if (!ret) {
4694                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4695                                  MLX5_FLOW_LAYER_INNER_L3 :
4696                                  MLX5_FLOW_LAYER_OUTER_L3;
4697                 if (!(item_flags & layer))
4698                         return rte_flow_error_set(error, EINVAL,
4699                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4700                                                   NULL,
4701                                                   "no IP protocol in pattern");
4702         }
4703         return ret;
4704 }
4705
4706 /**
4707  * Validate the generic modify field actions.
4708  * @param[in] dev
4709  *   Pointer to the rte_eth_dev structure.
4710  * @param[in] action_flags
4711  *   Holds the actions detected until now.
4712  * @param[in] action
4713  *   Pointer to the modify action.
4714  * @param[in] attr
4715  *   Pointer to the flow attributes.
4716  * @param[out] error
4717  *   Pointer to error structure.
4718  *
4719  * @return
4720  *   Number of header fields to modify (0 or more) on success,
4721  *   a negative errno value otherwise and rte_errno is set.
4722  */
4723 static int
4724 flow_dv_validate_action_modify_field(struct rte_eth_dev *dev,
4725                                    const uint64_t action_flags,
4726                                    const struct rte_flow_action *action,
4727                                    const struct rte_flow_attr *attr,
4728                                    struct rte_flow_error *error)
4729 {
4730         int ret = 0;
4731         struct mlx5_priv *priv = dev->data->dev_private;
4732         struct mlx5_dev_config *config = &priv->config;
4733         const struct rte_flow_action_modify_field *action_modify_field =
4734                 action->conf;
4735         uint32_t dst_width = mlx5_flow_item_field_width(config,
4736                                 action_modify_field->dst.field);
4737         uint32_t src_width = mlx5_flow_item_field_width(config,
4738                                 action_modify_field->src.field);
4739
4740         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4741         if (ret)
4742                 return ret;
4743
4744         if (action_modify_field->width == 0)
4745                 return rte_flow_error_set(error, EINVAL,
4746                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4747                                 "no bits are requested to be modified");
4748         else if (action_modify_field->width > dst_width ||
4749                  action_modify_field->width > src_width)
4750                 return rte_flow_error_set(error, EINVAL,
4751                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4752                                 "cannot modify more bits than"
4753                                 " the width of a field");
4754         if (action_modify_field->dst.field != RTE_FLOW_FIELD_VALUE &&
4755             action_modify_field->dst.field != RTE_FLOW_FIELD_POINTER) {
4756                 if ((action_modify_field->dst.offset +
4757                      action_modify_field->width > dst_width) ||
4758                     (action_modify_field->dst.offset % 32))
4759                         return rte_flow_error_set(error, EINVAL,
4760                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4761                                         "destination offset is too big"
4762                                         " or not aligned to 4 bytes");
4763                 if (action_modify_field->dst.level &&
4764                     action_modify_field->dst.field != RTE_FLOW_FIELD_TAG)
4765                         return rte_flow_error_set(error, ENOTSUP,
4766                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4767                                         "inner header fields modification"
4768                                         " is not supported");
4769         }
4770         if (action_modify_field->src.field != RTE_FLOW_FIELD_VALUE &&
4771             action_modify_field->src.field != RTE_FLOW_FIELD_POINTER) {
4772                 if (!attr->transfer && !attr->group)
4773                         return rte_flow_error_set(error, ENOTSUP,
4774                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4775                                         "modify field action is not"
4776                                         " supported for group 0");
4777                 if ((action_modify_field->src.offset +
4778                      action_modify_field->width > src_width) ||
4779                     (action_modify_field->src.offset % 32))
4780                         return rte_flow_error_set(error, EINVAL,
4781                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4782                                         "source offset is too big"
4783                                         " or not aligned to 4 bytes");
4784                 if (action_modify_field->src.level &&
4785                     action_modify_field->src.field != RTE_FLOW_FIELD_TAG)
4786                         return rte_flow_error_set(error, ENOTSUP,
4787                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4788                                         "inner header fields modification"
4789                                         " is not supported");
4790         }
4791         if ((action_modify_field->dst.field ==
4792              action_modify_field->src.field) &&
4793             (action_modify_field->dst.level ==
4794              action_modify_field->src.level))
4795                 return rte_flow_error_set(error, EINVAL,
4796                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4797                                 "source and destination fields"
4798                                 " cannot be the same");
4799         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VALUE ||
4800             action_modify_field->dst.field == RTE_FLOW_FIELD_POINTER)
4801                 return rte_flow_error_set(error, EINVAL,
4802                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4803                                 "immediate value or a pointer to it"
4804                                 " cannot be used as a destination");
4805         if (action_modify_field->dst.field == RTE_FLOW_FIELD_START ||
4806             action_modify_field->src.field == RTE_FLOW_FIELD_START)
4807                 return rte_flow_error_set(error, ENOTSUP,
4808                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4809                                 "modifications of an arbitrary"
4810                                 " place in a packet is not supported");
4811         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VLAN_TYPE ||
4812             action_modify_field->src.field == RTE_FLOW_FIELD_VLAN_TYPE)
4813                 return rte_flow_error_set(error, ENOTSUP,
4814                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4815                                 "modifications of the 802.1Q Tag"
4816                                 " Identifier is not supported");
4817         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VXLAN_VNI ||
4818             action_modify_field->src.field == RTE_FLOW_FIELD_VXLAN_VNI)
4819                 return rte_flow_error_set(error, ENOTSUP,
4820                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4821                                 "modifications of the VXLAN Network"
4822                                 " Identifier is not supported");
4823         if (action_modify_field->dst.field == RTE_FLOW_FIELD_GENEVE_VNI ||
4824             action_modify_field->src.field == RTE_FLOW_FIELD_GENEVE_VNI)
4825                 return rte_flow_error_set(error, ENOTSUP,
4826                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4827                                 "modifications of the GENEVE Network"
4828                                 " Identifier is not supported");
4829         if (action_modify_field->dst.field == RTE_FLOW_FIELD_MARK ||
4830             action_modify_field->src.field == RTE_FLOW_FIELD_MARK ||
4831             action_modify_field->dst.field == RTE_FLOW_FIELD_META ||
4832             action_modify_field->src.field == RTE_FLOW_FIELD_META) {
4833                 if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
4834                     !mlx5_flow_ext_mreg_supported(dev))
4835                         return rte_flow_error_set(error, ENOTSUP,
4836                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4837                                         "cannot modify mark or metadata without"
4838                                         " extended metadata register support");
4839         }
4840         if (action_modify_field->operation != RTE_FLOW_MODIFY_SET)
4841                 return rte_flow_error_set(error, ENOTSUP,
4842                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4843                                 "add and sub operations"
4844                                 " are not supported");
4845         return (action_modify_field->width / 32) +
4846                !!(action_modify_field->width % 32);
4847 }
4848
4849 /**
4850  * Validate jump action.
4851  *
4852  * @param[in] action
4853  *   Pointer to the jump action.
4854  * @param[in] action_flags
4855  *   Holds the actions detected until now.
4856  * @param[in] attributes
4857  *   Pointer to flow attributes
4858  * @param[in] external
4859  *   Action belongs to flow rule created by request external to PMD.
4860  * @param[out] error
4861  *   Pointer to error structure.
4862  *
4863  * @return
4864  *   0 on success, a negative errno value otherwise and rte_errno is set.
4865  */
4866 static int
4867 flow_dv_validate_action_jump(struct rte_eth_dev *dev,
4868                              const struct mlx5_flow_tunnel *tunnel,
4869                              const struct rte_flow_action *action,
4870                              uint64_t action_flags,
4871                              const struct rte_flow_attr *attributes,
4872                              bool external, struct rte_flow_error *error)
4873 {
4874         uint32_t target_group, table;
4875         int ret = 0;
4876         struct flow_grp_info grp_info = {
4877                 .external = !!external,
4878                 .transfer = !!attributes->transfer,
4879                 .fdb_def_rule = 1,
4880                 .std_tbl_fix = 0
4881         };
4882         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
4883                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4884                 return rte_flow_error_set(error, EINVAL,
4885                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4886                                           "can't have 2 fate actions in"
4887                                           " same flow");
4888         if (!action->conf)
4889                 return rte_flow_error_set(error, EINVAL,
4890                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4891                                           NULL, "action configuration not set");
4892         target_group =
4893                 ((const struct rte_flow_action_jump *)action->conf)->group;
4894         ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table,
4895                                        &grp_info, error);
4896         if (ret)
4897                 return ret;
4898         if (attributes->group == target_group &&
4899             !(action_flags & (MLX5_FLOW_ACTION_TUNNEL_SET |
4900                               MLX5_FLOW_ACTION_TUNNEL_MATCH)))
4901                 return rte_flow_error_set(error, EINVAL,
4902                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4903                                           "target group must be other than"
4904                                           " the current flow group");
4905         return 0;
4906 }
4907
4908 /*
4909  * Validate the port_id action.
4910  *
4911  * @param[in] dev
4912  *   Pointer to rte_eth_dev structure.
4913  * @param[in] action_flags
4914  *   Bit-fields that holds the actions detected until now.
4915  * @param[in] action
4916  *   Port_id RTE action structure.
4917  * @param[in] attr
4918  *   Attributes of flow that includes this action.
4919  * @param[out] error
4920  *   Pointer to error structure.
4921  *
4922  * @return
4923  *   0 on success, a negative errno value otherwise and rte_errno is set.
4924  */
4925 static int
4926 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
4927                                 uint64_t action_flags,
4928                                 const struct rte_flow_action *action,
4929                                 const struct rte_flow_attr *attr,
4930                                 struct rte_flow_error *error)
4931 {
4932         const struct rte_flow_action_port_id *port_id;
4933         struct mlx5_priv *act_priv;
4934         struct mlx5_priv *dev_priv;
4935         uint16_t port;
4936
4937         if (!attr->transfer)
4938                 return rte_flow_error_set(error, ENOTSUP,
4939                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4940                                           NULL,
4941                                           "port id action is valid in transfer"
4942                                           " mode only");
4943         if (!action || !action->conf)
4944                 return rte_flow_error_set(error, ENOTSUP,
4945                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4946                                           NULL,
4947                                           "port id action parameters must be"
4948                                           " specified");
4949         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
4950                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4951                 return rte_flow_error_set(error, EINVAL,
4952                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4953                                           "can have only one fate actions in"
4954                                           " a flow");
4955         dev_priv = mlx5_dev_to_eswitch_info(dev);
4956         if (!dev_priv)
4957                 return rte_flow_error_set(error, rte_errno,
4958                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4959                                           NULL,
4960                                           "failed to obtain E-Switch info");
4961         port_id = action->conf;
4962         port = port_id->original ? dev->data->port_id : port_id->id;
4963         act_priv = mlx5_port_to_eswitch_info(port, false);
4964         if (!act_priv)
4965                 return rte_flow_error_set
4966                                 (error, rte_errno,
4967                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
4968                                  "failed to obtain E-Switch port id for port");
4969         if (act_priv->domain_id != dev_priv->domain_id)
4970                 return rte_flow_error_set
4971                                 (error, EINVAL,
4972                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4973                                  "port does not belong to"
4974                                  " E-Switch being configured");
4975         return 0;
4976 }
4977
4978 /**
4979  * Get the maximum number of modify header actions.
4980  *
4981  * @param dev
4982  *   Pointer to rte_eth_dev structure.
4983  * @param flags
4984  *   Flags bits to check if root level.
4985  *
4986  * @return
4987  *   Max number of modify header actions device can support.
4988  */
4989 static inline unsigned int
4990 flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
4991                               uint64_t flags)
4992 {
4993         /*
4994          * There's no way to directly query the max capacity from FW.
4995          * The maximal value on root table should be assumed to be supported.
4996          */
4997         if (!(flags & MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL))
4998                 return MLX5_MAX_MODIFY_NUM;
4999         else
5000                 return MLX5_ROOT_TBL_MODIFY_NUM;
5001 }
5002
5003 /**
5004  * Validate the meter action.
5005  *
5006  * @param[in] dev
5007  *   Pointer to rte_eth_dev structure.
5008  * @param[in] action_flags
5009  *   Bit-fields that holds the actions detected until now.
5010  * @param[in] action
5011  *   Pointer to the meter action.
5012  * @param[in] attr
5013  *   Attributes of flow that includes this action.
5014  * @param[out] error
5015  *   Pointer to error structure.
5016  *
5017  * @return
5018  *   0 on success, a negative errno value otherwise and rte_ernno is set.
5019  */
5020 static int
5021 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
5022                                 uint64_t action_flags,
5023                                 const struct rte_flow_action *action,
5024                                 const struct rte_flow_attr *attr,
5025                                 bool *def_policy,
5026                                 struct rte_flow_error *error)
5027 {
5028         struct mlx5_priv *priv = dev->data->dev_private;
5029         const struct rte_flow_action_meter *am = action->conf;
5030         struct mlx5_flow_meter_info *fm;
5031         struct mlx5_flow_meter_policy *mtr_policy;
5032         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
5033
5034         if (!am)
5035                 return rte_flow_error_set(error, EINVAL,
5036                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5037                                           "meter action conf is NULL");
5038
5039         if (action_flags & MLX5_FLOW_ACTION_METER)
5040                 return rte_flow_error_set(error, ENOTSUP,
5041                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5042                                           "meter chaining not support");
5043         if (action_flags & MLX5_FLOW_ACTION_JUMP)
5044                 return rte_flow_error_set(error, ENOTSUP,
5045                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5046                                           "meter with jump not support");
5047         if (!priv->mtr_en)
5048                 return rte_flow_error_set(error, ENOTSUP,
5049                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5050                                           NULL,
5051                                           "meter action not supported");
5052         fm = mlx5_flow_meter_find(priv, am->mtr_id, NULL);
5053         if (!fm)
5054                 return rte_flow_error_set(error, EINVAL,
5055                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5056                                           "Meter not found");
5057         /* aso meter can always be shared by different domains */
5058         if (fm->ref_cnt && !priv->sh->meter_aso_en &&
5059             !(fm->transfer == attr->transfer ||
5060               (!fm->ingress && !attr->ingress && attr->egress) ||
5061               (!fm->egress && !attr->egress && attr->ingress)))
5062                 return rte_flow_error_set(error, EINVAL,
5063                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5064                         "Flow attributes domain are either invalid "
5065                         "or have a domain conflict with current "
5066                         "meter attributes");
5067         if (fm->def_policy) {
5068                 if (!((attr->transfer &&
5069                         mtrmng->def_policy[MLX5_MTR_DOMAIN_TRANSFER]) ||
5070                         (attr->egress &&
5071                         mtrmng->def_policy[MLX5_MTR_DOMAIN_EGRESS]) ||
5072                         (attr->ingress &&
5073                         mtrmng->def_policy[MLX5_MTR_DOMAIN_INGRESS])))
5074                         return rte_flow_error_set(error, EINVAL,
5075                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5076                                           "Flow attributes domain "
5077                                           "have a conflict with current "
5078                                           "meter domain attributes");
5079                 *def_policy = true;
5080         } else {
5081                 mtr_policy = mlx5_flow_meter_policy_find(dev,
5082                                                 fm->policy_id, NULL);
5083                 if (!mtr_policy)
5084                         return rte_flow_error_set(error, EINVAL,
5085                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5086                                           "Invalid policy id for meter ");
5087                 if (!((attr->transfer && mtr_policy->transfer) ||
5088                         (attr->egress && mtr_policy->egress) ||
5089                         (attr->ingress && mtr_policy->ingress)))
5090                         return rte_flow_error_set(error, EINVAL,
5091                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5092                                           "Flow attributes domain "
5093                                           "have a conflict with current "
5094                                           "meter domain attributes");
5095                 *def_policy = false;
5096         }
5097         return 0;
5098 }
5099
5100 /**
5101  * Validate the age action.
5102  *
5103  * @param[in] action_flags
5104  *   Holds the actions detected until now.
5105  * @param[in] action
5106  *   Pointer to the age action.
5107  * @param[in] dev
5108  *   Pointer to the Ethernet device structure.
5109  * @param[out] error
5110  *   Pointer to error structure.
5111  *
5112  * @return
5113  *   0 on success, a negative errno value otherwise and rte_errno is set.
5114  */
5115 static int
5116 flow_dv_validate_action_age(uint64_t action_flags,
5117                             const struct rte_flow_action *action,
5118                             struct rte_eth_dev *dev,
5119                             struct rte_flow_error *error)
5120 {
5121         struct mlx5_priv *priv = dev->data->dev_private;
5122         const struct rte_flow_action_age *age = action->conf;
5123
5124         if (!priv->config.devx || (priv->sh->cmng.counter_fallback &&
5125             !priv->sh->aso_age_mng))
5126                 return rte_flow_error_set(error, ENOTSUP,
5127                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5128                                           NULL,
5129                                           "age action not supported");
5130         if (!(action->conf))
5131                 return rte_flow_error_set(error, EINVAL,
5132                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5133                                           "configuration cannot be null");
5134         if (!(age->timeout))
5135                 return rte_flow_error_set(error, EINVAL,
5136                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5137                                           "invalid timeout value 0");
5138         if (action_flags & MLX5_FLOW_ACTION_AGE)
5139                 return rte_flow_error_set(error, EINVAL,
5140                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5141                                           "duplicate age actions set");
5142         return 0;
5143 }
5144
5145 /**
5146  * Validate the modify-header IPv4 DSCP actions.
5147  *
5148  * @param[in] action_flags
5149  *   Holds the actions detected until now.
5150  * @param[in] action
5151  *   Pointer to the modify action.
5152  * @param[in] item_flags
5153  *   Holds the items detected.
5154  * @param[out] error
5155  *   Pointer to error structure.
5156  *
5157  * @return
5158  *   0 on success, a negative errno value otherwise and rte_errno is set.
5159  */
5160 static int
5161 flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags,
5162                                          const struct rte_flow_action *action,
5163                                          const uint64_t item_flags,
5164                                          struct rte_flow_error *error)
5165 {
5166         int ret = 0;
5167
5168         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5169         if (!ret) {
5170                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
5171                         return rte_flow_error_set(error, EINVAL,
5172                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5173                                                   NULL,
5174                                                   "no ipv4 item in pattern");
5175         }
5176         return ret;
5177 }
5178
5179 /**
5180  * Validate the modify-header IPv6 DSCP actions.
5181  *
5182  * @param[in] action_flags
5183  *   Holds the actions detected until now.
5184  * @param[in] action
5185  *   Pointer to the modify action.
5186  * @param[in] item_flags
5187  *   Holds the items detected.
5188  * @param[out] error
5189  *   Pointer to error structure.
5190  *
5191  * @return
5192  *   0 on success, a negative errno value otherwise and rte_errno is set.
5193  */
5194 static int
5195 flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,
5196                                          const struct rte_flow_action *action,
5197                                          const uint64_t item_flags,
5198                                          struct rte_flow_error *error)
5199 {
5200         int ret = 0;
5201
5202         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5203         if (!ret) {
5204                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
5205                         return rte_flow_error_set(error, EINVAL,
5206                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5207                                                   NULL,
5208                                                   "no ipv6 item in pattern");
5209         }
5210         return ret;
5211 }
5212
5213 /**
5214  * Match modify-header resource.
5215  *
5216  * @param list
5217  *   Pointer to the hash list.
5218  * @param entry
5219  *   Pointer to exist resource entry object.
5220  * @param key
5221  *   Key of the new entry.
5222  * @param ctx
5223  *   Pointer to new modify-header resource.
5224  *
5225  * @return
5226  *   0 on matching, non-zero otherwise.
5227  */
5228 int
5229 flow_dv_modify_match_cb(struct mlx5_hlist *list __rte_unused,
5230                         struct mlx5_hlist_entry *entry,
5231                         uint64_t key __rte_unused, void *cb_ctx)
5232 {
5233         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5234         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5235         struct mlx5_flow_dv_modify_hdr_resource *resource =
5236                         container_of(entry, typeof(*resource), entry);
5237         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5238
5239         key_len += ref->actions_num * sizeof(ref->actions[0]);
5240         return ref->actions_num != resource->actions_num ||
5241                memcmp(&ref->ft_type, &resource->ft_type, key_len);
5242 }
5243
5244 struct mlx5_hlist_entry *
5245 flow_dv_modify_create_cb(struct mlx5_hlist *list, uint64_t key __rte_unused,
5246                          void *cb_ctx)
5247 {
5248         struct mlx5_dev_ctx_shared *sh = list->ctx;
5249         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5250         struct mlx5dv_dr_domain *ns;
5251         struct mlx5_flow_dv_modify_hdr_resource *entry;
5252         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5253         int ret;
5254         uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
5255         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5256
5257         entry = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*entry) + data_len, 0,
5258                             SOCKET_ID_ANY);
5259         if (!entry) {
5260                 rte_flow_error_set(ctx->error, ENOMEM,
5261                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5262                                    "cannot allocate resource memory");
5263                 return NULL;
5264         }
5265         rte_memcpy(&entry->ft_type,
5266                    RTE_PTR_ADD(ref, offsetof(typeof(*ref), ft_type)),
5267                    key_len + data_len);
5268         if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
5269                 ns = sh->fdb_domain;
5270         else if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
5271                 ns = sh->tx_domain;
5272         else
5273                 ns = sh->rx_domain;
5274         ret = mlx5_flow_os_create_flow_action_modify_header
5275                                         (sh->ctx, ns, entry,
5276                                          data_len, &entry->action);
5277         if (ret) {
5278                 mlx5_free(entry);
5279                 rte_flow_error_set(ctx->error, ENOMEM,
5280                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5281                                    NULL, "cannot create modification action");
5282                 return NULL;
5283         }
5284         return &entry->entry;
5285 }
5286
5287 /**
5288  * Validate the sample action.
5289  *
5290  * @param[in, out] action_flags
5291  *   Holds the actions detected until now.
5292  * @param[in] action
5293  *   Pointer to the sample action.
5294  * @param[in] dev
5295  *   Pointer to the Ethernet device structure.
5296  * @param[in] attr
5297  *   Attributes of flow that includes this action.
5298  * @param[in] item_flags
5299  *   Holds the items detected.
5300  * @param[in] rss
5301  *   Pointer to the RSS action.
5302  * @param[out] sample_rss
5303  *   Pointer to the RSS action in sample action list.
5304  * @param[out] count
5305  *   Pointer to the COUNT action in sample action list.
5306  * @param[out] fdb_mirror_limit
5307  *   Pointer to the FDB mirror limitation flag.
5308  * @param[out] error
5309  *   Pointer to error structure.
5310  *
5311  * @return
5312  *   0 on success, a negative errno value otherwise and rte_errno is set.
5313  */
5314 static int
5315 flow_dv_validate_action_sample(uint64_t *action_flags,
5316                                const struct rte_flow_action *action,
5317                                struct rte_eth_dev *dev,
5318                                const struct rte_flow_attr *attr,
5319                                uint64_t item_flags,
5320                                const struct rte_flow_action_rss *rss,
5321                                const struct rte_flow_action_rss **sample_rss,
5322                                const struct rte_flow_action_count **count,
5323                                int *fdb_mirror_limit,
5324                                struct rte_flow_error *error)
5325 {
5326         struct mlx5_priv *priv = dev->data->dev_private;
5327         struct mlx5_dev_config *dev_conf = &priv->config;
5328         const struct rte_flow_action_sample *sample = action->conf;
5329         const struct rte_flow_action *act;
5330         uint64_t sub_action_flags = 0;
5331         uint16_t queue_index = 0xFFFF;
5332         int actions_n = 0;
5333         int ret;
5334
5335         if (!sample)
5336                 return rte_flow_error_set(error, EINVAL,
5337                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5338                                           "configuration cannot be NULL");
5339         if (sample->ratio == 0)
5340                 return rte_flow_error_set(error, EINVAL,
5341                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5342                                           "ratio value starts from 1");
5343         if (!priv->config.devx || (sample->ratio > 0 && !priv->sampler_en))
5344                 return rte_flow_error_set(error, ENOTSUP,
5345                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5346                                           NULL,
5347                                           "sample action not supported");
5348         if (*action_flags & MLX5_FLOW_ACTION_SAMPLE)
5349                 return rte_flow_error_set(error, EINVAL,
5350                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5351                                           "Multiple sample actions not "
5352                                           "supported");
5353         if (*action_flags & MLX5_FLOW_ACTION_METER)
5354                 return rte_flow_error_set(error, EINVAL,
5355                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5356                                           "wrong action order, meter should "
5357                                           "be after sample action");
5358         if (*action_flags & MLX5_FLOW_ACTION_JUMP)
5359                 return rte_flow_error_set(error, EINVAL,
5360                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5361                                           "wrong action order, jump should "
5362                                           "be after sample action");
5363         act = sample->actions;
5364         for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
5365                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
5366                         return rte_flow_error_set(error, ENOTSUP,
5367                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5368                                                   act, "too many actions");
5369                 switch (act->type) {
5370                 case RTE_FLOW_ACTION_TYPE_QUEUE:
5371                         ret = mlx5_flow_validate_action_queue(act,
5372                                                               sub_action_flags,
5373                                                               dev,
5374                                                               attr, error);
5375                         if (ret < 0)
5376                                 return ret;
5377                         queue_index = ((const struct rte_flow_action_queue *)
5378                                                         (act->conf))->index;
5379                         sub_action_flags |= MLX5_FLOW_ACTION_QUEUE;
5380                         ++actions_n;
5381                         break;
5382                 case RTE_FLOW_ACTION_TYPE_RSS:
5383                         *sample_rss = act->conf;
5384                         ret = mlx5_flow_validate_action_rss(act,
5385                                                             sub_action_flags,
5386                                                             dev, attr,
5387                                                             item_flags,
5388                                                             error);
5389                         if (ret < 0)
5390                                 return ret;
5391                         if (rss && *sample_rss &&
5392                             ((*sample_rss)->level != rss->level ||
5393                             (*sample_rss)->types != rss->types))
5394                                 return rte_flow_error_set(error, ENOTSUP,
5395                                         RTE_FLOW_ERROR_TYPE_ACTION,
5396                                         NULL,
5397                                         "Can't use the different RSS types "
5398                                         "or level in the same flow");
5399                         if (*sample_rss != NULL && (*sample_rss)->queue_num)
5400                                 queue_index = (*sample_rss)->queue[0];
5401                         sub_action_flags |= MLX5_FLOW_ACTION_RSS;
5402                         ++actions_n;
5403                         break;
5404                 case RTE_FLOW_ACTION_TYPE_MARK:
5405                         ret = flow_dv_validate_action_mark(dev, act,
5406                                                            sub_action_flags,
5407                                                            attr, error);
5408                         if (ret < 0)
5409                                 return ret;
5410                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
5411                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK |
5412                                                 MLX5_FLOW_ACTION_MARK_EXT;
5413                         else
5414                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK;
5415                         ++actions_n;
5416                         break;
5417                 case RTE_FLOW_ACTION_TYPE_COUNT:
5418                         ret = flow_dv_validate_action_count
5419                                 (dev, is_shared_action_count(act),
5420                                  *action_flags | sub_action_flags,
5421                                  error);
5422                         if (ret < 0)
5423                                 return ret;
5424                         *count = act->conf;
5425                         sub_action_flags |= MLX5_FLOW_ACTION_COUNT;
5426                         *action_flags |= MLX5_FLOW_ACTION_COUNT;
5427                         ++actions_n;
5428                         break;
5429                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
5430                         ret = flow_dv_validate_action_port_id(dev,
5431                                                               sub_action_flags,
5432                                                               act,
5433                                                               attr,
5434                                                               error);
5435                         if (ret)
5436                                 return ret;
5437                         sub_action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5438                         ++actions_n;
5439                         break;
5440                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5441                         ret = flow_dv_validate_action_raw_encap_decap
5442                                 (dev, NULL, act->conf, attr, &sub_action_flags,
5443                                  &actions_n, action, item_flags, error);
5444                         if (ret < 0)
5445                                 return ret;
5446                         ++actions_n;
5447                         break;
5448                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5449                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5450                         ret = flow_dv_validate_action_l2_encap(dev,
5451                                                                sub_action_flags,
5452                                                                act, attr,
5453                                                                error);
5454                         if (ret < 0)
5455                                 return ret;
5456                         sub_action_flags |= MLX5_FLOW_ACTION_ENCAP;
5457                         ++actions_n;
5458                         break;
5459                 default:
5460                         return rte_flow_error_set(error, ENOTSUP,
5461                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5462                                                   NULL,
5463                                                   "Doesn't support optional "
5464                                                   "action");
5465                 }
5466         }
5467         if (attr->ingress && !attr->transfer) {
5468                 if (!(sub_action_flags & (MLX5_FLOW_ACTION_QUEUE |
5469                                           MLX5_FLOW_ACTION_RSS)))
5470                         return rte_flow_error_set(error, EINVAL,
5471                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5472                                                   NULL,
5473                                                   "Ingress must has a dest "
5474                                                   "QUEUE for Sample");
5475         } else if (attr->egress && !attr->transfer) {
5476                 return rte_flow_error_set(error, ENOTSUP,
5477                                           RTE_FLOW_ERROR_TYPE_ACTION,
5478                                           NULL,
5479                                           "Sample Only support Ingress "
5480                                           "or E-Switch");
5481         } else if (sample->actions->type != RTE_FLOW_ACTION_TYPE_END) {
5482                 MLX5_ASSERT(attr->transfer);
5483                 if (sample->ratio > 1)
5484                         return rte_flow_error_set(error, ENOTSUP,
5485                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5486                                                   NULL,
5487                                                   "E-Switch doesn't support "
5488                                                   "any optional action "
5489                                                   "for sampling");
5490                 if (sub_action_flags & MLX5_FLOW_ACTION_QUEUE)
5491                         return rte_flow_error_set(error, ENOTSUP,
5492                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5493                                                   NULL,
5494                                                   "unsupported action QUEUE");
5495                 if (sub_action_flags & MLX5_FLOW_ACTION_RSS)
5496                         return rte_flow_error_set(error, ENOTSUP,
5497                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5498                                                   NULL,
5499                                                   "unsupported action QUEUE");
5500                 if (!(sub_action_flags & MLX5_FLOW_ACTION_PORT_ID))
5501                         return rte_flow_error_set(error, EINVAL,
5502                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5503                                                   NULL,
5504                                                   "E-Switch must has a dest "
5505                                                   "port for mirroring");
5506                 if (!priv->config.hca_attr.reg_c_preserve &&
5507                      priv->representor_id != -1)
5508                         *fdb_mirror_limit = 1;
5509         }
5510         /* Continue validation for Xcap actions.*/
5511         if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) &&
5512             (queue_index == 0xFFFF ||
5513              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
5514                 if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
5515                      MLX5_FLOW_XCAP_ACTIONS)
5516                         return rte_flow_error_set(error, ENOTSUP,
5517                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5518                                                   NULL, "encap and decap "
5519                                                   "combination aren't "
5520                                                   "supported");
5521                 if (!attr->transfer && attr->ingress && (sub_action_flags &
5522                                                         MLX5_FLOW_ACTION_ENCAP))
5523                         return rte_flow_error_set(error, ENOTSUP,
5524                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5525                                                   NULL, "encap is not supported"
5526                                                   " for ingress traffic");
5527         }
5528         return 0;
5529 }
5530
5531 /**
5532  * Find existing modify-header resource or create and register a new one.
5533  *
5534  * @param dev[in, out]
5535  *   Pointer to rte_eth_dev structure.
5536  * @param[in, out] resource
5537  *   Pointer to modify-header resource.
5538  * @parm[in, out] dev_flow
5539  *   Pointer to the dev_flow.
5540  * @param[out] error
5541  *   pointer to error structure.
5542  *
5543  * @return
5544  *   0 on success otherwise -errno and errno is set.
5545  */
5546 static int
5547 flow_dv_modify_hdr_resource_register
5548                         (struct rte_eth_dev *dev,
5549                          struct mlx5_flow_dv_modify_hdr_resource *resource,
5550                          struct mlx5_flow *dev_flow,
5551                          struct rte_flow_error *error)
5552 {
5553         struct mlx5_priv *priv = dev->data->dev_private;
5554         struct mlx5_dev_ctx_shared *sh = priv->sh;
5555         uint32_t key_len = sizeof(*resource) -
5556                            offsetof(typeof(*resource), ft_type) +
5557                            resource->actions_num * sizeof(resource->actions[0]);
5558         struct mlx5_hlist_entry *entry;
5559         struct mlx5_flow_cb_ctx ctx = {
5560                 .error = error,
5561                 .data = resource,
5562         };
5563         uint64_t key64;
5564
5565         resource->flags = dev_flow->dv.group ? 0 :
5566                           MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
5567         if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
5568                                     resource->flags))
5569                 return rte_flow_error_set(error, EOVERFLOW,
5570                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5571                                           "too many modify header items");
5572         key64 = __rte_raw_cksum(&resource->ft_type, key_len, 0);
5573         entry = mlx5_hlist_register(sh->modify_cmds, key64, &ctx);
5574         if (!entry)
5575                 return -rte_errno;
5576         resource = container_of(entry, typeof(*resource), entry);
5577         dev_flow->handle->dvh.modify_hdr = resource;
5578         return 0;
5579 }
5580
5581 /**
5582  * Get DV flow counter by index.
5583  *
5584  * @param[in] dev
5585  *   Pointer to the Ethernet device structure.
5586  * @param[in] idx
5587  *   mlx5 flow counter index in the container.
5588  * @param[out] ppool
5589  *   mlx5 flow counter pool in the container.
5590  *
5591  * @return
5592  *   Pointer to the counter, NULL otherwise.
5593  */
5594 static struct mlx5_flow_counter *
5595 flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
5596                            uint32_t idx,
5597                            struct mlx5_flow_counter_pool **ppool)
5598 {
5599         struct mlx5_priv *priv = dev->data->dev_private;
5600         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5601         struct mlx5_flow_counter_pool *pool;
5602
5603         /* Decrease to original index and clear shared bit. */
5604         idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
5605         MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cmng->n);
5606         pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
5607         MLX5_ASSERT(pool);
5608         if (ppool)
5609                 *ppool = pool;
5610         return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
5611 }
5612
5613 /**
5614  * Check the devx counter belongs to the pool.
5615  *
5616  * @param[in] pool
5617  *   Pointer to the counter pool.
5618  * @param[in] id
5619  *   The counter devx ID.
5620  *
5621  * @return
5622  *   True if counter belongs to the pool, false otherwise.
5623  */
5624 static bool
5625 flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id)
5626 {
5627         int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
5628                    MLX5_COUNTERS_PER_POOL;
5629
5630         if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
5631                 return true;
5632         return false;
5633 }
5634
5635 /**
5636  * Get a pool by devx counter ID.
5637  *
5638  * @param[in] cmng
5639  *   Pointer to the counter management.
5640  * @param[in] id
5641  *   The counter devx ID.
5642  *
5643  * @return
5644  *   The counter pool pointer if exists, NULL otherwise,
5645  */
5646 static struct mlx5_flow_counter_pool *
5647 flow_dv_find_pool_by_id(struct mlx5_flow_counter_mng *cmng, int id)
5648 {
5649         uint32_t i;
5650         struct mlx5_flow_counter_pool *pool = NULL;
5651
5652         rte_spinlock_lock(&cmng->pool_update_sl);
5653         /* Check last used pool. */
5654         if (cmng->last_pool_idx != POOL_IDX_INVALID &&
5655             flow_dv_is_counter_in_pool(cmng->pools[cmng->last_pool_idx], id)) {
5656                 pool = cmng->pools[cmng->last_pool_idx];
5657                 goto out;
5658         }
5659         /* ID out of range means no suitable pool in the container. */
5660         if (id > cmng->max_id || id < cmng->min_id)
5661                 goto out;
5662         /*
5663          * Find the pool from the end of the container, since mostly counter
5664          * ID is sequence increasing, and the last pool should be the needed
5665          * one.
5666          */
5667         i = cmng->n_valid;
5668         while (i--) {
5669                 struct mlx5_flow_counter_pool *pool_tmp = cmng->pools[i];
5670
5671                 if (flow_dv_is_counter_in_pool(pool_tmp, id)) {
5672                         pool = pool_tmp;
5673                         break;
5674                 }
5675         }
5676 out:
5677         rte_spinlock_unlock(&cmng->pool_update_sl);
5678         return pool;
5679 }
5680
5681 /**
5682  * Resize a counter container.
5683  *
5684  * @param[in] dev
5685  *   Pointer to the Ethernet device structure.
5686  *
5687  * @return
5688  *   0 on success, otherwise negative errno value and rte_errno is set.
5689  */
5690 static int
5691 flow_dv_container_resize(struct rte_eth_dev *dev)
5692 {
5693         struct mlx5_priv *priv = dev->data->dev_private;
5694         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5695         void *old_pools = cmng->pools;
5696         uint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE;
5697         uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
5698         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
5699
5700         if (!pools) {
5701                 rte_errno = ENOMEM;
5702                 return -ENOMEM;
5703         }
5704         if (old_pools)
5705                 memcpy(pools, old_pools, cmng->n *
5706                                        sizeof(struct mlx5_flow_counter_pool *));
5707         cmng->n = resize;
5708         cmng->pools = pools;
5709         if (old_pools)
5710                 mlx5_free(old_pools);
5711         return 0;
5712 }
5713
5714 /**
5715  * Query a devx flow counter.
5716  *
5717  * @param[in] dev
5718  *   Pointer to the Ethernet device structure.
5719  * @param[in] counter
5720  *   Index to the flow counter.
5721  * @param[out] pkts
5722  *   The statistics value of packets.
5723  * @param[out] bytes
5724  *   The statistics value of bytes.
5725  *
5726  * @return
5727  *   0 on success, otherwise a negative errno value and rte_errno is set.
5728  */
5729 static inline int
5730 _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
5731                      uint64_t *bytes)
5732 {
5733         struct mlx5_priv *priv = dev->data->dev_private;
5734         struct mlx5_flow_counter_pool *pool = NULL;
5735         struct mlx5_flow_counter *cnt;
5736         int offset;
5737
5738         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
5739         MLX5_ASSERT(pool);
5740         if (priv->sh->cmng.counter_fallback)
5741                 return mlx5_devx_cmd_flow_counter_query(cnt->dcs_when_active, 0,
5742                                         0, pkts, bytes, 0, NULL, NULL, 0);
5743         rte_spinlock_lock(&pool->sl);
5744         if (!pool->raw) {
5745                 *pkts = 0;
5746                 *bytes = 0;
5747         } else {
5748                 offset = MLX5_CNT_ARRAY_IDX(pool, cnt);
5749                 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
5750                 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
5751         }
5752         rte_spinlock_unlock(&pool->sl);
5753         return 0;
5754 }
5755
5756 /**
5757  * Create and initialize a new counter pool.
5758  *
5759  * @param[in] dev
5760  *   Pointer to the Ethernet device structure.
5761  * @param[out] dcs
5762  *   The devX counter handle.
5763  * @param[in] age
5764  *   Whether the pool is for counter that was allocated for aging.
5765  * @param[in/out] cont_cur
5766  *   Pointer to the container pointer, it will be update in pool resize.
5767  *
5768  * @return
5769  *   The pool container pointer on success, NULL otherwise and rte_errno is set.
5770  */
5771 static struct mlx5_flow_counter_pool *
5772 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
5773                     uint32_t age)
5774 {
5775         struct mlx5_priv *priv = dev->data->dev_private;
5776         struct mlx5_flow_counter_pool *pool;
5777         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5778         bool fallback = priv->sh->cmng.counter_fallback;
5779         uint32_t size = sizeof(*pool);
5780
5781         size += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE;
5782         size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * MLX5_AGE_SIZE);
5783         pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
5784         if (!pool) {
5785                 rte_errno = ENOMEM;
5786                 return NULL;
5787         }
5788         pool->raw = NULL;
5789         pool->is_aged = !!age;
5790         pool->query_gen = 0;
5791         pool->min_dcs = dcs;
5792         rte_spinlock_init(&pool->sl);
5793         rte_spinlock_init(&pool->csl);
5794         TAILQ_INIT(&pool->counters[0]);
5795         TAILQ_INIT(&pool->counters[1]);
5796         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
5797         rte_spinlock_lock(&cmng->pool_update_sl);
5798         pool->index = cmng->n_valid;
5799         if (pool->index == cmng->n && flow_dv_container_resize(dev)) {
5800                 mlx5_free(pool);
5801                 rte_spinlock_unlock(&cmng->pool_update_sl);
5802                 return NULL;
5803         }
5804         cmng->pools[pool->index] = pool;
5805         cmng->n_valid++;
5806         if (unlikely(fallback)) {
5807                 int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
5808
5809                 if (base < cmng->min_id)
5810                         cmng->min_id = base;
5811                 if (base > cmng->max_id)
5812                         cmng->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
5813                 cmng->last_pool_idx = pool->index;
5814         }
5815         rte_spinlock_unlock(&cmng->pool_update_sl);
5816         return pool;
5817 }
5818
5819 /**
5820  * Prepare a new counter and/or a new counter pool.
5821  *
5822  * @param[in] dev
5823  *   Pointer to the Ethernet device structure.
5824  * @param[out] cnt_free
5825  *   Where to put the pointer of a new counter.
5826  * @param[in] age
5827  *   Whether the pool is for counter that was allocated for aging.
5828  *
5829  * @return
5830  *   The counter pool pointer and @p cnt_free is set on success,
5831  *   NULL otherwise and rte_errno is set.
5832  */
5833 static struct mlx5_flow_counter_pool *
5834 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
5835                              struct mlx5_flow_counter **cnt_free,
5836                              uint32_t age)
5837 {
5838         struct mlx5_priv *priv = dev->data->dev_private;
5839         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5840         struct mlx5_flow_counter_pool *pool;
5841         struct mlx5_counters tmp_tq;
5842         struct mlx5_devx_obj *dcs = NULL;
5843         struct mlx5_flow_counter *cnt;
5844         enum mlx5_counter_type cnt_type =
5845                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
5846         bool fallback = priv->sh->cmng.counter_fallback;
5847         uint32_t i;
5848
5849         if (fallback) {
5850                 /* bulk_bitmap must be 0 for single counter allocation. */
5851                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
5852                 if (!dcs)
5853                         return NULL;
5854                 pool = flow_dv_find_pool_by_id(cmng, dcs->id);
5855                 if (!pool) {
5856                         pool = flow_dv_pool_create(dev, dcs, age);
5857                         if (!pool) {
5858                                 mlx5_devx_cmd_destroy(dcs);
5859                                 return NULL;
5860                         }
5861                 }
5862                 i = dcs->id % MLX5_COUNTERS_PER_POOL;
5863                 cnt = MLX5_POOL_GET_CNT(pool, i);
5864                 cnt->pool = pool;
5865                 cnt->dcs_when_free = dcs;
5866                 *cnt_free = cnt;
5867                 return pool;
5868         }
5869         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
5870         if (!dcs) {
5871                 rte_errno = ENODATA;
5872                 return NULL;
5873         }
5874         pool = flow_dv_pool_create(dev, dcs, age);
5875         if (!pool) {
5876                 mlx5_devx_cmd_destroy(dcs);
5877                 return NULL;
5878         }
5879         TAILQ_INIT(&tmp_tq);
5880         for (i = 1; i < MLX5_COUNTERS_PER_POOL; ++i) {
5881                 cnt = MLX5_POOL_GET_CNT(pool, i);
5882                 cnt->pool = pool;
5883                 TAILQ_INSERT_HEAD(&tmp_tq, cnt, next);
5884         }
5885         rte_spinlock_lock(&cmng->csl[cnt_type]);
5886         TAILQ_CONCAT(&cmng->counters[cnt_type], &tmp_tq, next);
5887         rte_spinlock_unlock(&cmng->csl[cnt_type]);
5888         *cnt_free = MLX5_POOL_GET_CNT(pool, 0);
5889         (*cnt_free)->pool = pool;
5890         return pool;
5891 }
5892
5893 /**
5894  * Allocate a flow counter.
5895  *
5896  * @param[in] dev
5897  *   Pointer to the Ethernet device structure.
5898  * @param[in] age
5899  *   Whether the counter was allocated for aging.
5900  *
5901  * @return
5902  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
5903  */
5904 static uint32_t
5905 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age)
5906 {
5907         struct mlx5_priv *priv = dev->data->dev_private;
5908         struct mlx5_flow_counter_pool *pool = NULL;
5909         struct mlx5_flow_counter *cnt_free = NULL;
5910         bool fallback = priv->sh->cmng.counter_fallback;
5911         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5912         enum mlx5_counter_type cnt_type =
5913                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
5914         uint32_t cnt_idx;
5915
5916         if (!priv->config.devx) {
5917                 rte_errno = ENOTSUP;
5918                 return 0;
5919         }
5920         /* Get free counters from container. */
5921         rte_spinlock_lock(&cmng->csl[cnt_type]);
5922         cnt_free = TAILQ_FIRST(&cmng->counters[cnt_type]);
5923         if (cnt_free)
5924                 TAILQ_REMOVE(&cmng->counters[cnt_type], cnt_free, next);
5925         rte_spinlock_unlock(&cmng->csl[cnt_type]);
5926         if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free, age))
5927                 goto err;
5928         pool = cnt_free->pool;
5929         if (fallback)
5930                 cnt_free->dcs_when_active = cnt_free->dcs_when_free;
5931         /* Create a DV counter action only in the first time usage. */
5932         if (!cnt_free->action) {
5933                 uint16_t offset;
5934                 struct mlx5_devx_obj *dcs;
5935                 int ret;
5936
5937                 if (!fallback) {
5938                         offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free);
5939                         dcs = pool->min_dcs;
5940                 } else {
5941                         offset = 0;
5942                         dcs = cnt_free->dcs_when_free;
5943                 }
5944                 ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset,
5945                                                             &cnt_free->action);
5946                 if (ret) {
5947                         rte_errno = errno;
5948                         goto err;
5949                 }
5950         }
5951         cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
5952                                 MLX5_CNT_ARRAY_IDX(pool, cnt_free));
5953         /* Update the counter reset values. */
5954         if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
5955                                  &cnt_free->bytes))
5956                 goto err;
5957         if (!fallback && !priv->sh->cmng.query_thread_on)
5958                 /* Start the asynchronous batch query by the host thread. */
5959                 mlx5_set_query_alarm(priv->sh);
5960         /*
5961          * When the count action isn't shared (by ID), shared_info field is
5962          * used for indirect action API's refcnt.
5963          * When the counter action is not shared neither by ID nor by indirect
5964          * action API, shared info must be 1.
5965          */
5966         cnt_free->shared_info.refcnt = 1;
5967         return cnt_idx;
5968 err:
5969         if (cnt_free) {
5970                 cnt_free->pool = pool;
5971                 if (fallback)
5972                         cnt_free->dcs_when_free = cnt_free->dcs_when_active;
5973                 rte_spinlock_lock(&cmng->csl[cnt_type]);
5974                 TAILQ_INSERT_TAIL(&cmng->counters[cnt_type], cnt_free, next);
5975                 rte_spinlock_unlock(&cmng->csl[cnt_type]);
5976         }
5977         return 0;
5978 }
5979
5980 /**
5981  * Allocate a shared flow counter.
5982  *
5983  * @param[in] ctx
5984  *   Pointer to the shared counter configuration.
5985  * @param[in] data
5986  *   Pointer to save the allocated counter index.
5987  *
5988  * @return
5989  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
5990  */
5991
5992 static int32_t
5993 flow_dv_counter_alloc_shared_cb(void *ctx, union mlx5_l3t_data *data)
5994 {
5995         struct mlx5_shared_counter_conf *conf = ctx;
5996         struct rte_eth_dev *dev = conf->dev;
5997         struct mlx5_flow_counter *cnt;
5998
5999         data->dword = flow_dv_counter_alloc(dev, 0);
6000         data->dword |= MLX5_CNT_SHARED_OFFSET;
6001         cnt = flow_dv_counter_get_by_idx(dev, data->dword, NULL);
6002         cnt->shared_info.id = conf->id;
6003         return 0;
6004 }
6005
6006 /**
6007  * Get a shared flow counter.
6008  *
6009  * @param[in] dev
6010  *   Pointer to the Ethernet device structure.
6011  * @param[in] id
6012  *   Counter identifier.
6013  *
6014  * @return
6015  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
6016  */
6017 static uint32_t
6018 flow_dv_counter_get_shared(struct rte_eth_dev *dev, uint32_t id)
6019 {
6020         struct mlx5_priv *priv = dev->data->dev_private;
6021         struct mlx5_shared_counter_conf conf = {
6022                 .dev = dev,
6023                 .id = id,
6024         };
6025         union mlx5_l3t_data data = {
6026                 .dword = 0,
6027         };
6028
6029         mlx5_l3t_prepare_entry(priv->sh->cnt_id_tbl, id, &data,
6030                                flow_dv_counter_alloc_shared_cb, &conf);
6031         return data.dword;
6032 }
6033
6034 /**
6035  * Get age param from counter index.
6036  *
6037  * @param[in] dev
6038  *   Pointer to the Ethernet device structure.
6039  * @param[in] counter
6040  *   Index to the counter handler.
6041  *
6042  * @return
6043  *   The aging parameter specified for the counter index.
6044  */
6045 static struct mlx5_age_param*
6046 flow_dv_counter_idx_get_age(struct rte_eth_dev *dev,
6047                                 uint32_t counter)
6048 {
6049         struct mlx5_flow_counter *cnt;
6050         struct mlx5_flow_counter_pool *pool = NULL;
6051
6052         flow_dv_counter_get_by_idx(dev, counter, &pool);
6053         counter = (counter - 1) % MLX5_COUNTERS_PER_POOL;
6054         cnt = MLX5_POOL_GET_CNT(pool, counter);
6055         return MLX5_CNT_TO_AGE(cnt);
6056 }
6057
6058 /**
6059  * Remove a flow counter from aged counter list.
6060  *
6061  * @param[in] dev
6062  *   Pointer to the Ethernet device structure.
6063  * @param[in] counter
6064  *   Index to the counter handler.
6065  * @param[in] cnt
6066  *   Pointer to the counter handler.
6067  */
6068 static void
6069 flow_dv_counter_remove_from_age(struct rte_eth_dev *dev,
6070                                 uint32_t counter, struct mlx5_flow_counter *cnt)
6071 {
6072         struct mlx5_age_info *age_info;
6073         struct mlx5_age_param *age_param;
6074         struct mlx5_priv *priv = dev->data->dev_private;
6075         uint16_t expected = AGE_CANDIDATE;
6076
6077         age_info = GET_PORT_AGE_INFO(priv);
6078         age_param = flow_dv_counter_idx_get_age(dev, counter);
6079         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
6080                                          AGE_FREE, false, __ATOMIC_RELAXED,
6081                                          __ATOMIC_RELAXED)) {
6082                 /**
6083                  * We need the lock even it is age timeout,
6084                  * since counter may still in process.
6085                  */
6086                 rte_spinlock_lock(&age_info->aged_sl);
6087                 TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
6088                 rte_spinlock_unlock(&age_info->aged_sl);
6089                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
6090         }
6091 }
6092
6093 /**
6094  * Release a flow counter.
6095  *
6096  * @param[in] dev
6097  *   Pointer to the Ethernet device structure.
6098  * @param[in] counter
6099  *   Index to the counter handler.
6100  */
6101 static void
6102 flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter)
6103 {
6104         struct mlx5_priv *priv = dev->data->dev_private;
6105         struct mlx5_flow_counter_pool *pool = NULL;
6106         struct mlx5_flow_counter *cnt;
6107         enum mlx5_counter_type cnt_type;
6108
6109         if (!counter)
6110                 return;
6111         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
6112         MLX5_ASSERT(pool);
6113         /*
6114          * If the counter action is shared by ID, the l3t_clear_entry function
6115          * reduces its references counter. If after the reduction the action is
6116          * still referenced, the function returns here and does not release it.
6117          */
6118         if (IS_LEGACY_SHARED_CNT(counter) &&
6119             mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl, cnt->shared_info.id))
6120                 return;
6121         /*
6122          * If the counter action is shared by indirect action API, the atomic
6123          * function reduces its references counter. If after the reduction the
6124          * action is still referenced, the function returns here and does not
6125          * release it.
6126          * When the counter action is not shared neither by ID nor by indirect
6127          * action API, shared info is 1 before the reduction, so this condition
6128          * is failed and function doesn't return here.
6129          */
6130         if (!IS_LEGACY_SHARED_CNT(counter) &&
6131             __atomic_sub_fetch(&cnt->shared_info.refcnt, 1, __ATOMIC_RELAXED))
6132                 return;
6133         if (pool->is_aged)
6134                 flow_dv_counter_remove_from_age(dev, counter, cnt);
6135         cnt->pool = pool;
6136         /*
6137          * Put the counter back to list to be updated in none fallback mode.
6138          * Currently, we are using two list alternately, while one is in query,
6139          * add the freed counter to the other list based on the pool query_gen
6140          * value. After query finishes, add counter the list to the global
6141          * container counter list. The list changes while query starts. In
6142          * this case, lock will not be needed as query callback and release
6143          * function both operate with the different list.
6144          */
6145         if (!priv->sh->cmng.counter_fallback) {
6146                 rte_spinlock_lock(&pool->csl);
6147                 TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
6148                 rte_spinlock_unlock(&pool->csl);
6149         } else {
6150                 cnt->dcs_when_free = cnt->dcs_when_active;
6151                 cnt_type = pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
6152                                            MLX5_COUNTER_TYPE_ORIGIN;
6153                 rte_spinlock_lock(&priv->sh->cmng.csl[cnt_type]);
6154                 TAILQ_INSERT_TAIL(&priv->sh->cmng.counters[cnt_type],
6155                                   cnt, next);
6156                 rte_spinlock_unlock(&priv->sh->cmng.csl[cnt_type]);
6157         }
6158 }
6159
6160 /**
6161  * Resize a meter id container.
6162  *
6163  * @param[in] dev
6164  *   Pointer to the Ethernet device structure.
6165  *
6166  * @return
6167  *   0 on success, otherwise negative errno value and rte_errno is set.
6168  */
6169 static int
6170 flow_dv_mtr_container_resize(struct rte_eth_dev *dev)
6171 {
6172         struct mlx5_priv *priv = dev->data->dev_private;
6173         struct mlx5_aso_mtr_pools_mng *pools_mng =
6174                                 &priv->sh->mtrmng->pools_mng;
6175         void *old_pools = pools_mng->pools;
6176         uint32_t resize = pools_mng->n + MLX5_MTRS_CONTAINER_RESIZE;
6177         uint32_t mem_size = sizeof(struct mlx5_aso_mtr_pool *) * resize;
6178         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
6179
6180         if (!pools) {
6181                 rte_errno = ENOMEM;
6182                 return -ENOMEM;
6183         }
6184         if (!pools_mng->n)
6185                 if (mlx5_aso_queue_init(priv->sh, ASO_OPC_MOD_POLICER)) {
6186                         mlx5_free(pools);
6187                         return -ENOMEM;
6188                 }
6189         if (old_pools)
6190                 memcpy(pools, old_pools, pools_mng->n *
6191                                        sizeof(struct mlx5_aso_mtr_pool *));
6192         pools_mng->n = resize;
6193         pools_mng->pools = pools;
6194         if (old_pools)
6195                 mlx5_free(old_pools);
6196         return 0;
6197 }
6198
6199 /**
6200  * Prepare a new meter and/or a new meter pool.
6201  *
6202  * @param[in] dev
6203  *   Pointer to the Ethernet device structure.
6204  * @param[out] mtr_free
6205  *   Where to put the pointer of a new meter.g.
6206  *
6207  * @return
6208  *   The meter pool pointer and @mtr_free is set on success,
6209  *   NULL otherwise and rte_errno is set.
6210  */
6211 static struct mlx5_aso_mtr_pool *
6212 flow_dv_mtr_pool_create(struct rte_eth_dev *dev,
6213                              struct mlx5_aso_mtr **mtr_free)
6214 {
6215         struct mlx5_priv *priv = dev->data->dev_private;
6216         struct mlx5_aso_mtr_pools_mng *pools_mng =
6217                                 &priv->sh->mtrmng->pools_mng;
6218         struct mlx5_aso_mtr_pool *pool = NULL;
6219         struct mlx5_devx_obj *dcs = NULL;
6220         uint32_t i;
6221         uint32_t log_obj_size;
6222
6223         log_obj_size = rte_log2_u32(MLX5_ASO_MTRS_PER_POOL >> 1);
6224         dcs = mlx5_devx_cmd_create_flow_meter_aso_obj(priv->sh->ctx,
6225                         priv->sh->pdn, log_obj_size);
6226         if (!dcs) {
6227                 rte_errno = ENODATA;
6228                 return NULL;
6229         }
6230         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
6231         if (!pool) {
6232                 rte_errno = ENOMEM;
6233                 claim_zero(mlx5_devx_cmd_destroy(dcs));
6234                 return NULL;
6235         }
6236         pool->devx_obj = dcs;
6237         pool->index = pools_mng->n_valid;
6238         if (pool->index == pools_mng->n && flow_dv_mtr_container_resize(dev)) {
6239                 mlx5_free(pool);
6240                 claim_zero(mlx5_devx_cmd_destroy(dcs));
6241                 return NULL;
6242         }
6243         pools_mng->pools[pool->index] = pool;
6244         pools_mng->n_valid++;
6245         for (i = 1; i < MLX5_ASO_MTRS_PER_POOL; ++i) {
6246                 pool->mtrs[i].offset = i;
6247                 LIST_INSERT_HEAD(&pools_mng->meters,
6248                                                 &pool->mtrs[i], next);
6249         }
6250         pool->mtrs[0].offset = 0;
6251         *mtr_free = &pool->mtrs[0];
6252         return pool;
6253 }
6254
6255 /**
6256  * Release a flow meter into pool.
6257  *
6258  * @param[in] dev
6259  *   Pointer to the Ethernet device structure.
6260  * @param[in] mtr_idx
6261  *   Index to aso flow meter.
6262  */
6263 static void
6264 flow_dv_aso_mtr_release_to_pool(struct rte_eth_dev *dev, uint32_t mtr_idx)
6265 {
6266         struct mlx5_priv *priv = dev->data->dev_private;
6267         struct mlx5_aso_mtr_pools_mng *pools_mng =
6268                                 &priv->sh->mtrmng->pools_mng;
6269         struct mlx5_aso_mtr *aso_mtr = mlx5_aso_meter_by_idx(priv, mtr_idx);
6270
6271         MLX5_ASSERT(aso_mtr);
6272         rte_spinlock_lock(&pools_mng->mtrsl);
6273         memset(&aso_mtr->fm, 0, sizeof(struct mlx5_flow_meter_info));
6274         aso_mtr->state = ASO_METER_FREE;
6275         LIST_INSERT_HEAD(&pools_mng->meters, aso_mtr, next);
6276         rte_spinlock_unlock(&pools_mng->mtrsl);
6277 }
6278
6279 /**
6280  * Allocate a aso flow meter.
6281  *
6282  * @param[in] dev
6283  *   Pointer to the Ethernet device structure.
6284  *
6285  * @return
6286  *   Index to aso flow meter on success, 0 otherwise and rte_errno is set.
6287  */
6288 static uint32_t
6289 flow_dv_mtr_alloc(struct rte_eth_dev *dev)
6290 {
6291         struct mlx5_priv *priv = dev->data->dev_private;
6292         struct mlx5_aso_mtr *mtr_free = NULL;
6293         struct mlx5_aso_mtr_pools_mng *pools_mng =
6294                                 &priv->sh->mtrmng->pools_mng;
6295         struct mlx5_aso_mtr_pool *pool;
6296         uint32_t mtr_idx = 0;
6297
6298         if (!priv->config.devx) {
6299                 rte_errno = ENOTSUP;
6300                 return 0;
6301         }
6302         /* Allocate the flow meter memory. */
6303         /* Get free meters from management. */
6304         rte_spinlock_lock(&pools_mng->mtrsl);
6305         mtr_free = LIST_FIRST(&pools_mng->meters);
6306         if (mtr_free)
6307                 LIST_REMOVE(mtr_free, next);
6308         if (!mtr_free && !flow_dv_mtr_pool_create(dev, &mtr_free)) {
6309                 rte_spinlock_unlock(&pools_mng->mtrsl);
6310                 return 0;
6311         }
6312         mtr_free->state = ASO_METER_WAIT;
6313         rte_spinlock_unlock(&pools_mng->mtrsl);
6314         pool = container_of(mtr_free,
6315                         struct mlx5_aso_mtr_pool,
6316                         mtrs[mtr_free->offset]);
6317         mtr_idx = MLX5_MAKE_MTR_IDX(pool->index, mtr_free->offset);
6318         if (!mtr_free->fm.meter_action) {
6319 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
6320                 struct rte_flow_error error;
6321                 uint8_t reg_id;
6322
6323                 reg_id = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &error);
6324                 mtr_free->fm.meter_action =
6325                         mlx5_glue->dv_create_flow_action_aso
6326                                                 (priv->sh->rx_domain,
6327                                                  pool->devx_obj->obj,
6328                                                  mtr_free->offset,
6329                                                  (1 << MLX5_FLOW_COLOR_GREEN),
6330                                                  reg_id - REG_C_0);
6331 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
6332                 if (!mtr_free->fm.meter_action) {
6333                         flow_dv_aso_mtr_release_to_pool(dev, mtr_idx);
6334                         return 0;
6335                 }
6336         }
6337         return mtr_idx;
6338 }
6339
6340 /**
6341  * Verify the @p attributes will be correctly understood by the NIC and store
6342  * them in the @p flow if everything is correct.
6343  *
6344  * @param[in] dev
6345  *   Pointer to dev struct.
6346  * @param[in] attributes
6347  *   Pointer to flow attributes
6348  * @param[in] external
6349  *   This flow rule is created by request external to PMD.
6350  * @param[out] error
6351  *   Pointer to error structure.
6352  *
6353  * @return
6354  *   - 0 on success and non root table.
6355  *   - 1 on success and root table.
6356  *   - a negative errno value otherwise and rte_errno is set.
6357  */
6358 static int
6359 flow_dv_validate_attributes(struct rte_eth_dev *dev,
6360                             const struct mlx5_flow_tunnel *tunnel,
6361                             const struct rte_flow_attr *attributes,
6362                             const struct flow_grp_info *grp_info,
6363                             struct rte_flow_error *error)
6364 {
6365         struct mlx5_priv *priv = dev->data->dev_private;
6366         uint32_t lowest_priority = mlx5_get_lowest_priority(dev, attributes);
6367         int ret = 0;
6368
6369 #ifndef HAVE_MLX5DV_DR
6370         RTE_SET_USED(tunnel);
6371         RTE_SET_USED(grp_info);
6372         if (attributes->group)
6373                 return rte_flow_error_set(error, ENOTSUP,
6374                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
6375                                           NULL,
6376                                           "groups are not supported");
6377 #else
6378         uint32_t table = 0;
6379
6380         ret = mlx5_flow_group_to_table(dev, tunnel, attributes->group, &table,
6381                                        grp_info, error);
6382         if (ret)
6383                 return ret;
6384         if (!table)
6385                 ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
6386 #endif
6387         if (attributes->priority != MLX5_FLOW_LOWEST_PRIO_INDICATOR &&
6388             attributes->priority > lowest_priority)
6389                 return rte_flow_error_set(error, ENOTSUP,
6390                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
6391                                           NULL,
6392                                           "priority out of range");
6393         if (attributes->transfer) {
6394                 if (!priv->config.dv_esw_en)
6395                         return rte_flow_error_set
6396                                 (error, ENOTSUP,
6397                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6398                                  "E-Switch dr is not supported");
6399                 if (!(priv->representor || priv->master))
6400                         return rte_flow_error_set
6401                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6402                                  NULL, "E-Switch configuration can only be"
6403                                  " done by a master or a representor device");
6404                 if (attributes->egress)
6405                         return rte_flow_error_set
6406                                 (error, ENOTSUP,
6407                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
6408                                  "egress is not supported");
6409         }
6410         if (!(attributes->egress ^ attributes->ingress))
6411                 return rte_flow_error_set(error, ENOTSUP,
6412                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
6413                                           "must specify exactly one of "
6414                                           "ingress or egress");
6415         return ret;
6416 }
6417
6418 static uint16_t
6419 mlx5_flow_locate_proto_l3(const struct rte_flow_item **head,
6420                           const struct rte_flow_item *end)
6421 {
6422         const struct rte_flow_item *item = *head;
6423         uint16_t l3_protocol;
6424
6425         for (; item != end; item++) {
6426                 switch (item->type) {
6427                 default:
6428                         break;
6429                 case RTE_FLOW_ITEM_TYPE_IPV4:
6430                         l3_protocol = RTE_ETHER_TYPE_IPV4;
6431                         goto l3_ok;
6432                 case RTE_FLOW_ITEM_TYPE_IPV6:
6433                         l3_protocol = RTE_ETHER_TYPE_IPV6;
6434                         goto l3_ok;
6435                 case RTE_FLOW_ITEM_TYPE_ETH:
6436                         if (item->mask && item->spec) {
6437                                 MLX5_ETHER_TYPE_FROM_HEADER(rte_flow_item_eth,
6438                                                             type, item,
6439                                                             l3_protocol);
6440                                 if (l3_protocol == RTE_ETHER_TYPE_IPV4 ||
6441                                     l3_protocol == RTE_ETHER_TYPE_IPV6)
6442                                         goto l3_ok;
6443                         }
6444                         break;
6445                 case RTE_FLOW_ITEM_TYPE_VLAN:
6446                         if (item->mask && item->spec) {
6447                                 MLX5_ETHER_TYPE_FROM_HEADER(rte_flow_item_vlan,
6448                                                             inner_type, item,
6449                                                             l3_protocol);
6450                                 if (l3_protocol == RTE_ETHER_TYPE_IPV4 ||
6451                                     l3_protocol == RTE_ETHER_TYPE_IPV6)
6452                                         goto l3_ok;
6453                         }
6454                         break;
6455                 }
6456         }
6457         return 0;
6458 l3_ok:
6459         *head = item;
6460         return l3_protocol;
6461 }
6462
6463 static uint8_t
6464 mlx5_flow_locate_proto_l4(const struct rte_flow_item **head,
6465                           const struct rte_flow_item *end)
6466 {
6467         const struct rte_flow_item *item = *head;
6468         uint8_t l4_protocol;
6469
6470         for (; item != end; item++) {
6471                 switch (item->type) {
6472                 default:
6473                         break;
6474                 case RTE_FLOW_ITEM_TYPE_TCP:
6475                         l4_protocol = IPPROTO_TCP;
6476                         goto l4_ok;
6477                 case RTE_FLOW_ITEM_TYPE_UDP:
6478                         l4_protocol = IPPROTO_UDP;
6479                         goto l4_ok;
6480                 case RTE_FLOW_ITEM_TYPE_IPV4:
6481                         if (item->mask && item->spec) {
6482                                 const struct rte_flow_item_ipv4 *mask, *spec;
6483
6484                                 mask = (typeof(mask))item->mask;
6485                                 spec = (typeof(spec))item->spec;
6486                                 l4_protocol = mask->hdr.next_proto_id &
6487                                               spec->hdr.next_proto_id;
6488                                 if (l4_protocol == IPPROTO_TCP ||
6489                                     l4_protocol == IPPROTO_UDP)
6490                                         goto l4_ok;
6491                         }
6492                         break;
6493                 case RTE_FLOW_ITEM_TYPE_IPV6:
6494                         if (item->mask && item->spec) {
6495                                 const struct rte_flow_item_ipv6 *mask, *spec;
6496                                 mask = (typeof(mask))item->mask;
6497                                 spec = (typeof(spec))item->spec;
6498                                 l4_protocol = mask->hdr.proto & spec->hdr.proto;
6499                                 if (l4_protocol == IPPROTO_TCP ||
6500                                     l4_protocol == IPPROTO_UDP)
6501                                         goto l4_ok;
6502                         }
6503                         break;
6504                 }
6505         }
6506         return 0;
6507 l4_ok:
6508         *head = item;
6509         return l4_protocol;
6510 }
6511
6512 static int
6513 flow_dv_validate_item_integrity(struct rte_eth_dev *dev,
6514                                 const struct rte_flow_item *rule_items,
6515                                 const struct rte_flow_item *integrity_item,
6516                                 struct rte_flow_error *error)
6517 {
6518         struct mlx5_priv *priv = dev->data->dev_private;
6519         const struct rte_flow_item *tunnel_item, *end_item, *item = rule_items;
6520         const struct rte_flow_item_integrity *mask = (typeof(mask))
6521                                                      integrity_item->mask;
6522         const struct rte_flow_item_integrity *spec = (typeof(spec))
6523                                                      integrity_item->spec;
6524         uint32_t protocol;
6525
6526         if (!priv->config.hca_attr.pkt_integrity_match)
6527                 return rte_flow_error_set(error, ENOTSUP,
6528                                           RTE_FLOW_ERROR_TYPE_ITEM,
6529                                           integrity_item,
6530                                           "packet integrity integrity_item not supported");
6531         if (!mask)
6532                 mask = &rte_flow_item_integrity_mask;
6533         if (!mlx5_validate_integrity_item(mask))
6534                 return rte_flow_error_set(error, ENOTSUP,
6535                                           RTE_FLOW_ERROR_TYPE_ITEM,
6536                                           integrity_item,
6537                                           "unsupported integrity filter");
6538         tunnel_item = mlx5_flow_find_tunnel_item(rule_items);
6539         if (spec->level > 1) {
6540                 if (!tunnel_item)
6541                         return rte_flow_error_set(error, ENOTSUP,
6542                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6543                                                   integrity_item,
6544                                                   "missing tunnel item");
6545                 item = tunnel_item;
6546                 end_item = mlx5_find_end_item(tunnel_item);
6547         } else {
6548                 end_item = tunnel_item ? tunnel_item :
6549                            mlx5_find_end_item(integrity_item);
6550         }
6551         if (mask->l3_ok || mask->ipv4_csum_ok) {
6552                 protocol = mlx5_flow_locate_proto_l3(&item, end_item);
6553                 if (!protocol)
6554                         return rte_flow_error_set(error, EINVAL,
6555                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6556                                                   integrity_item,
6557                                                   "missing L3 protocol");
6558         }
6559         if (mask->l4_ok || mask->l4_csum_ok) {
6560                 protocol = mlx5_flow_locate_proto_l4(&item, end_item);
6561                 if (!protocol)
6562                         return rte_flow_error_set(error, EINVAL,
6563                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6564                                                   integrity_item,
6565                                                   "missing L4 protocol");
6566         }
6567         return 0;
6568 }
6569
6570 /**
6571  * Internal validation function. For validating both actions and items.
6572  *
6573  * @param[in] dev
6574  *   Pointer to the rte_eth_dev structure.
6575  * @param[in] attr
6576  *   Pointer to the flow attributes.
6577  * @param[in] items
6578  *   Pointer to the list of items.
6579  * @param[in] actions
6580  *   Pointer to the list of actions.
6581  * @param[in] external
6582  *   This flow rule is created by request external to PMD.
6583  * @param[in] hairpin
6584  *   Number of hairpin TX actions, 0 means classic flow.
6585  * @param[out] error
6586  *   Pointer to the error structure.
6587  *
6588  * @return
6589  *   0 on success, a negative errno value otherwise and rte_errno is set.
6590  */
6591 static int
6592 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
6593                  const struct rte_flow_item items[],
6594                  const struct rte_flow_action actions[],
6595                  bool external, int hairpin, struct rte_flow_error *error)
6596 {
6597         int ret;
6598         uint64_t action_flags = 0;
6599         uint64_t item_flags = 0;
6600         uint64_t last_item = 0;
6601         uint8_t next_protocol = 0xff;
6602         uint16_t ether_type = 0;
6603         int actions_n = 0;
6604         uint8_t item_ipv6_proto = 0;
6605         int fdb_mirror_limit = 0;
6606         int modify_after_mirror = 0;
6607         const struct rte_flow_item *geneve_item = NULL;
6608         const struct rte_flow_item *gre_item = NULL;
6609         const struct rte_flow_item *gtp_item = NULL;
6610         const struct rte_flow_action_raw_decap *decap;
6611         const struct rte_flow_action_raw_encap *encap;
6612         const struct rte_flow_action_rss *rss = NULL;
6613         const struct rte_flow_action_rss *sample_rss = NULL;
6614         const struct rte_flow_action_count *sample_count = NULL;
6615         const struct rte_flow_item_tcp nic_tcp_mask = {
6616                 .hdr = {
6617                         .tcp_flags = 0xFF,
6618                         .src_port = RTE_BE16(UINT16_MAX),
6619                         .dst_port = RTE_BE16(UINT16_MAX),
6620                 }
6621         };
6622         const struct rte_flow_item_ipv6 nic_ipv6_mask = {
6623                 .hdr = {
6624                         .src_addr =
6625                         "\xff\xff\xff\xff\xff\xff\xff\xff"
6626                         "\xff\xff\xff\xff\xff\xff\xff\xff",
6627                         .dst_addr =
6628                         "\xff\xff\xff\xff\xff\xff\xff\xff"
6629                         "\xff\xff\xff\xff\xff\xff\xff\xff",
6630                         .vtc_flow = RTE_BE32(0xffffffff),
6631                         .proto = 0xff,
6632                         .hop_limits = 0xff,
6633                 },
6634                 .has_frag_ext = 1,
6635         };
6636         const struct rte_flow_item_ecpri nic_ecpri_mask = {
6637                 .hdr = {
6638                         .common = {
6639                                 .u32 =
6640                                 RTE_BE32(((const struct rte_ecpri_common_hdr) {
6641                                         .type = 0xFF,
6642                                         }).u32),
6643                         },
6644                         .dummy[0] = 0xffffffff,
6645                 },
6646         };
6647         struct mlx5_priv *priv = dev->data->dev_private;
6648         struct mlx5_dev_config *dev_conf = &priv->config;
6649         uint16_t queue_index = 0xFFFF;
6650         const struct rte_flow_item_vlan *vlan_m = NULL;
6651         uint32_t rw_act_num = 0;
6652         uint64_t is_root;
6653         const struct mlx5_flow_tunnel *tunnel;
6654         enum mlx5_tof_rule_type tof_rule_type;
6655         struct flow_grp_info grp_info = {
6656                 .external = !!external,
6657                 .transfer = !!attr->transfer,
6658                 .fdb_def_rule = !!priv->fdb_def_rule,
6659                 .std_tbl_fix = true,
6660         };
6661         const struct rte_eth_hairpin_conf *conf;
6662         const struct rte_flow_item *rule_items = items;
6663         bool def_policy = false;
6664
6665         if (items == NULL)
6666                 return -1;
6667         tunnel = is_tunnel_offload_active(dev) ?
6668                  mlx5_get_tof(items, actions, &tof_rule_type) : NULL;
6669         if (tunnel) {
6670                 if (priv->representor)
6671                         return rte_flow_error_set
6672                                 (error, ENOTSUP,
6673                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6674                                  NULL, "decap not supported for VF representor");
6675                 if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_SET_RULE)
6676                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
6677                 else if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_MATCH_RULE)
6678                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |
6679                                         MLX5_FLOW_ACTION_DECAP;
6680                 grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
6681                                         (dev, attr, tunnel, tof_rule_type);
6682         }
6683         ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error);
6684         if (ret < 0)
6685                 return ret;
6686         is_root = (uint64_t)ret;
6687         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
6688                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
6689                 int type = items->type;
6690
6691                 if (!mlx5_flow_os_item_supported(type))
6692                         return rte_flow_error_set(error, ENOTSUP,
6693                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6694                                                   NULL, "item not supported");
6695                 switch (type) {
6696                 case RTE_FLOW_ITEM_TYPE_VOID:
6697                         break;
6698                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
6699                         ret = flow_dv_validate_item_port_id
6700                                         (dev, items, attr, item_flags, error);
6701                         if (ret < 0)
6702                                 return ret;
6703                         last_item = MLX5_FLOW_ITEM_PORT_ID;
6704                         break;
6705                 case RTE_FLOW_ITEM_TYPE_ETH:
6706                         ret = mlx5_flow_validate_item_eth(items, item_flags,
6707                                                           true, error);
6708                         if (ret < 0)
6709                                 return ret;
6710                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
6711                                              MLX5_FLOW_LAYER_OUTER_L2;
6712                         if (items->mask != NULL && items->spec != NULL) {
6713                                 ether_type =
6714                                         ((const struct rte_flow_item_eth *)
6715                                          items->spec)->type;
6716                                 ether_type &=
6717                                         ((const struct rte_flow_item_eth *)
6718                                          items->mask)->type;
6719                                 ether_type = rte_be_to_cpu_16(ether_type);
6720                         } else {
6721                                 ether_type = 0;
6722                         }
6723                         break;
6724                 case RTE_FLOW_ITEM_TYPE_VLAN:
6725                         ret = flow_dv_validate_item_vlan(items, item_flags,
6726                                                          dev, error);
6727                         if (ret < 0)
6728                                 return ret;
6729                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
6730                                              MLX5_FLOW_LAYER_OUTER_VLAN;
6731                         if (items->mask != NULL && items->spec != NULL) {
6732                                 ether_type =
6733                                         ((const struct rte_flow_item_vlan *)
6734                                          items->spec)->inner_type;
6735                                 ether_type &=
6736                                         ((const struct rte_flow_item_vlan *)
6737                                          items->mask)->inner_type;
6738                                 ether_type = rte_be_to_cpu_16(ether_type);
6739                         } else {
6740                                 ether_type = 0;
6741                         }
6742                         /* Store outer VLAN mask for of_push_vlan action. */
6743                         if (!tunnel)
6744                                 vlan_m = items->mask;
6745                         break;
6746                 case RTE_FLOW_ITEM_TYPE_IPV4:
6747                         mlx5_flow_tunnel_ip_check(items, next_protocol,
6748                                                   &item_flags, &tunnel);
6749                         ret = flow_dv_validate_item_ipv4(items, item_flags,
6750                                                          last_item, ether_type,
6751                                                          error);
6752                         if (ret < 0)
6753                                 return ret;
6754                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
6755                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
6756                         if (items->mask != NULL &&
6757                             ((const struct rte_flow_item_ipv4 *)
6758                              items->mask)->hdr.next_proto_id) {
6759                                 next_protocol =
6760                                         ((const struct rte_flow_item_ipv4 *)
6761                                          (items->spec))->hdr.next_proto_id;
6762                                 next_protocol &=
6763                                         ((const struct rte_flow_item_ipv4 *)
6764                                          (items->mask))->hdr.next_proto_id;
6765                         } else {
6766                                 /* Reset for inner layer. */
6767                                 next_protocol = 0xff;
6768                         }
6769                         break;
6770                 case RTE_FLOW_ITEM_TYPE_IPV6:
6771                         mlx5_flow_tunnel_ip_check(items, next_protocol,
6772                                                   &item_flags, &tunnel);
6773                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
6774                                                            last_item,
6775                                                            ether_type,
6776                                                            &nic_ipv6_mask,
6777                                                            error);
6778                         if (ret < 0)
6779                                 return ret;
6780                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
6781                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
6782                         if (items->mask != NULL &&
6783                             ((const struct rte_flow_item_ipv6 *)
6784                              items->mask)->hdr.proto) {
6785                                 item_ipv6_proto =
6786                                         ((const struct rte_flow_item_ipv6 *)
6787                                          items->spec)->hdr.proto;
6788                                 next_protocol =
6789                                         ((const struct rte_flow_item_ipv6 *)
6790                                          items->spec)->hdr.proto;
6791                                 next_protocol &=
6792                                         ((const struct rte_flow_item_ipv6 *)
6793                                          items->mask)->hdr.proto;
6794                         } else {
6795                                 /* Reset for inner layer. */
6796                                 next_protocol = 0xff;
6797                         }
6798                         break;
6799                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
6800                         ret = flow_dv_validate_item_ipv6_frag_ext(items,
6801                                                                   item_flags,
6802                                                                   error);
6803                         if (ret < 0)
6804                                 return ret;
6805                         last_item = tunnel ?
6806                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
6807                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
6808                         if (items->mask != NULL &&
6809                             ((const struct rte_flow_item_ipv6_frag_ext *)
6810                              items->mask)->hdr.next_header) {
6811                                 next_protocol =
6812                                 ((const struct rte_flow_item_ipv6_frag_ext *)
6813                                  items->spec)->hdr.next_header;
6814                                 next_protocol &=
6815                                 ((const struct rte_flow_item_ipv6_frag_ext *)
6816                                  items->mask)->hdr.next_header;
6817                         } else {
6818                                 /* Reset for inner layer. */
6819                                 next_protocol = 0xff;
6820                         }
6821                         break;
6822                 case RTE_FLOW_ITEM_TYPE_TCP:
6823                         ret = mlx5_flow_validate_item_tcp
6824                                                 (items, item_flags,
6825                                                  next_protocol,
6826                                                  &nic_tcp_mask,
6827                                                  error);
6828                         if (ret < 0)
6829                                 return ret;
6830                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
6831                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
6832                         break;
6833                 case RTE_FLOW_ITEM_TYPE_UDP:
6834                         ret = mlx5_flow_validate_item_udp(items, item_flags,
6835                                                           next_protocol,
6836                                                           error);
6837                         if (ret < 0)
6838                                 return ret;
6839                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
6840                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
6841                         break;
6842                 case RTE_FLOW_ITEM_TYPE_GRE:
6843                         ret = mlx5_flow_validate_item_gre(items, item_flags,
6844                                                           next_protocol, error);
6845                         if (ret < 0)
6846                                 return ret;
6847                         gre_item = items;
6848                         last_item = MLX5_FLOW_LAYER_GRE;
6849                         break;
6850                 case RTE_FLOW_ITEM_TYPE_NVGRE:
6851                         ret = mlx5_flow_validate_item_nvgre(items, item_flags,
6852                                                             next_protocol,
6853                                                             error);
6854                         if (ret < 0)
6855                                 return ret;
6856                         last_item = MLX5_FLOW_LAYER_NVGRE;
6857                         break;
6858                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
6859                         ret = mlx5_flow_validate_item_gre_key
6860                                 (items, item_flags, gre_item, error);
6861                         if (ret < 0)
6862                                 return ret;
6863                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
6864                         break;
6865                 case RTE_FLOW_ITEM_TYPE_VXLAN:
6866                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
6867                                                             error);
6868                         if (ret < 0)
6869                                 return ret;
6870                         last_item = MLX5_FLOW_LAYER_VXLAN;
6871                         break;
6872                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
6873                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
6874                                                                 item_flags, dev,
6875                                                                 error);
6876                         if (ret < 0)
6877                                 return ret;
6878                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
6879                         break;
6880                 case RTE_FLOW_ITEM_TYPE_GENEVE:
6881                         ret = mlx5_flow_validate_item_geneve(items,
6882                                                              item_flags, dev,
6883                                                              error);
6884                         if (ret < 0)
6885                                 return ret;
6886                         geneve_item = items;
6887                         last_item = MLX5_FLOW_LAYER_GENEVE;
6888                         break;
6889                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
6890                         ret = mlx5_flow_validate_item_geneve_opt(items,
6891                                                                  last_item,
6892                                                                  geneve_item,
6893                                                                  dev,
6894                                                                  error);
6895                         if (ret < 0)
6896                                 return ret;
6897                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
6898                         break;
6899                 case RTE_FLOW_ITEM_TYPE_MPLS:
6900                         ret = mlx5_flow_validate_item_mpls(dev, items,
6901                                                            item_flags,
6902                                                            last_item, error);
6903                         if (ret < 0)
6904                                 return ret;
6905                         last_item = MLX5_FLOW_LAYER_MPLS;
6906                         break;
6907
6908                 case RTE_FLOW_ITEM_TYPE_MARK:
6909                         ret = flow_dv_validate_item_mark(dev, items, attr,
6910                                                          error);
6911                         if (ret < 0)
6912                                 return ret;
6913                         last_item = MLX5_FLOW_ITEM_MARK;
6914                         break;
6915                 case RTE_FLOW_ITEM_TYPE_META:
6916                         ret = flow_dv_validate_item_meta(dev, items, attr,
6917                                                          error);
6918                         if (ret < 0)
6919                                 return ret;
6920                         last_item = MLX5_FLOW_ITEM_METADATA;
6921                         break;
6922                 case RTE_FLOW_ITEM_TYPE_ICMP:
6923                         ret = mlx5_flow_validate_item_icmp(items, item_flags,
6924                                                            next_protocol,
6925                                                            error);
6926                         if (ret < 0)
6927                                 return ret;
6928                         last_item = MLX5_FLOW_LAYER_ICMP;
6929                         break;
6930                 case RTE_FLOW_ITEM_TYPE_ICMP6:
6931                         ret = mlx5_flow_validate_item_icmp6(items, item_flags,
6932                                                             next_protocol,
6933                                                             error);
6934                         if (ret < 0)
6935                                 return ret;
6936                         item_ipv6_proto = IPPROTO_ICMPV6;
6937                         last_item = MLX5_FLOW_LAYER_ICMP6;
6938                         break;
6939                 case RTE_FLOW_ITEM_TYPE_TAG:
6940                         ret = flow_dv_validate_item_tag(dev, items,
6941                                                         attr, error);
6942                         if (ret < 0)
6943                                 return ret;
6944                         last_item = MLX5_FLOW_ITEM_TAG;
6945                         break;
6946                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
6947                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
6948                         break;
6949                 case RTE_FLOW_ITEM_TYPE_GTP:
6950                         ret = flow_dv_validate_item_gtp(dev, items, item_flags,
6951                                                         error);
6952                         if (ret < 0)
6953                                 return ret;
6954                         gtp_item = items;
6955                         last_item = MLX5_FLOW_LAYER_GTP;
6956                         break;
6957                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
6958                         ret = flow_dv_validate_item_gtp_psc(items, last_item,
6959                                                             gtp_item, attr,
6960                                                             error);
6961                         if (ret < 0)
6962                                 return ret;
6963                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
6964                         break;
6965                 case RTE_FLOW_ITEM_TYPE_ECPRI:
6966                         /* Capacity will be checked in the translate stage. */
6967                         ret = mlx5_flow_validate_item_ecpri(items, item_flags,
6968                                                             last_item,
6969                                                             ether_type,
6970                                                             &nic_ecpri_mask,
6971                                                             error);
6972                         if (ret < 0)
6973                                 return ret;
6974                         last_item = MLX5_FLOW_LAYER_ECPRI;
6975                         break;
6976                 case RTE_FLOW_ITEM_TYPE_INTEGRITY:
6977                         if (item_flags & MLX5_FLOW_ITEM_INTEGRITY)
6978                                 return rte_flow_error_set
6979                                         (error, ENOTSUP,
6980                                          RTE_FLOW_ERROR_TYPE_ITEM,
6981                                          NULL, "multiple integrity items not supported");
6982                         ret = flow_dv_validate_item_integrity(dev, rule_items,
6983                                                               items, error);
6984                         if (ret < 0)
6985                                 return ret;
6986                         last_item = MLX5_FLOW_ITEM_INTEGRITY;
6987                         break;
6988                 case RTE_FLOW_ITEM_TYPE_CONNTRACK:
6989                         ret = flow_dv_validate_item_aso_ct(dev, items,
6990                                                            &item_flags, error);
6991                         if (ret < 0)
6992                                 return ret;
6993                         break;
6994                 case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
6995                         /* tunnel offload item was processed before
6996                          * list it here as a supported type
6997                          */
6998                         break;
6999                 default:
7000                         return rte_flow_error_set(error, ENOTSUP,
7001                                                   RTE_FLOW_ERROR_TYPE_ITEM,
7002                                                   NULL, "item not supported");
7003                 }
7004                 item_flags |= last_item;
7005         }
7006         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
7007                 int type = actions->type;
7008                 bool shared_count = false;
7009
7010                 if (!mlx5_flow_os_action_supported(type))
7011                         return rte_flow_error_set(error, ENOTSUP,
7012                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7013                                                   actions,
7014                                                   "action not supported");
7015                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
7016                         return rte_flow_error_set(error, ENOTSUP,
7017                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7018                                                   actions, "too many actions");
7019                 if (action_flags &
7020                         MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
7021                         return rte_flow_error_set(error, ENOTSUP,
7022                                 RTE_FLOW_ERROR_TYPE_ACTION,
7023                                 NULL, "meter action with policy "
7024                                 "must be the last action");
7025                 switch (type) {
7026                 case RTE_FLOW_ACTION_TYPE_VOID:
7027                         break;
7028                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
7029                         ret = flow_dv_validate_action_port_id(dev,
7030                                                               action_flags,
7031                                                               actions,
7032                                                               attr,
7033                                                               error);
7034                         if (ret)
7035                                 return ret;
7036                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
7037                         ++actions_n;
7038                         break;
7039                 case RTE_FLOW_ACTION_TYPE_FLAG:
7040                         ret = flow_dv_validate_action_flag(dev, action_flags,
7041                                                            attr, error);
7042                         if (ret < 0)
7043                                 return ret;
7044                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
7045                                 /* Count all modify-header actions as one. */
7046                                 if (!(action_flags &
7047                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
7048                                         ++actions_n;
7049                                 action_flags |= MLX5_FLOW_ACTION_FLAG |
7050                                                 MLX5_FLOW_ACTION_MARK_EXT;
7051                                 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7052                                         modify_after_mirror = 1;
7053
7054                         } else {
7055                                 action_flags |= MLX5_FLOW_ACTION_FLAG;
7056                                 ++actions_n;
7057                         }
7058                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
7059                         break;
7060                 case RTE_FLOW_ACTION_TYPE_MARK:
7061                         ret = flow_dv_validate_action_mark(dev, actions,
7062                                                            action_flags,
7063                                                            attr, error);
7064                         if (ret < 0)
7065                                 return ret;
7066                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
7067                                 /* Count all modify-header actions as one. */
7068                                 if (!(action_flags &
7069                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
7070                                         ++actions_n;
7071                                 action_flags |= MLX5_FLOW_ACTION_MARK |
7072                                                 MLX5_FLOW_ACTION_MARK_EXT;
7073                                 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7074                                         modify_after_mirror = 1;
7075                         } else {
7076                                 action_flags |= MLX5_FLOW_ACTION_MARK;
7077                                 ++actions_n;
7078                         }
7079                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
7080                         break;
7081                 case RTE_FLOW_ACTION_TYPE_SET_META:
7082                         ret = flow_dv_validate_action_set_meta(dev, actions,
7083                                                                action_flags,
7084                                                                attr, error);
7085                         if (ret < 0)
7086                                 return ret;
7087                         /* Count all modify-header actions as one action. */
7088                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7089                                 ++actions_n;
7090                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7091                                 modify_after_mirror = 1;
7092                         action_flags |= MLX5_FLOW_ACTION_SET_META;
7093                         rw_act_num += MLX5_ACT_NUM_SET_META;
7094                         break;
7095                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
7096                         ret = flow_dv_validate_action_set_tag(dev, actions,
7097                                                               action_flags,
7098                                                               attr, error);
7099                         if (ret < 0)
7100                                 return ret;
7101                         /* Count all modify-header actions as one action. */
7102                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7103                                 ++actions_n;
7104                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7105                                 modify_after_mirror = 1;
7106                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
7107                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7108                         break;
7109                 case RTE_FLOW_ACTION_TYPE_DROP:
7110                         ret = mlx5_flow_validate_action_drop(action_flags,
7111                                                              attr, error);
7112                         if (ret < 0)
7113                                 return ret;
7114                         action_flags |= MLX5_FLOW_ACTION_DROP;
7115                         ++actions_n;
7116                         break;
7117                 case RTE_FLOW_ACTION_TYPE_QUEUE:
7118                         ret = mlx5_flow_validate_action_queue(actions,
7119                                                               action_flags, dev,
7120                                                               attr, error);
7121                         if (ret < 0)
7122                                 return ret;
7123                         queue_index = ((const struct rte_flow_action_queue *)
7124                                                         (actions->conf))->index;
7125                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
7126                         ++actions_n;
7127                         break;
7128                 case RTE_FLOW_ACTION_TYPE_RSS:
7129                         rss = actions->conf;
7130                         ret = mlx5_flow_validate_action_rss(actions,
7131                                                             action_flags, dev,
7132                                                             attr, item_flags,
7133                                                             error);
7134                         if (ret < 0)
7135                                 return ret;
7136                         if (rss && sample_rss &&
7137                             (sample_rss->level != rss->level ||
7138                             sample_rss->types != rss->types))
7139                                 return rte_flow_error_set(error, ENOTSUP,
7140                                         RTE_FLOW_ERROR_TYPE_ACTION,
7141                                         NULL,
7142                                         "Can't use the different RSS types "
7143                                         "or level in the same flow");
7144                         if (rss != NULL && rss->queue_num)
7145                                 queue_index = rss->queue[0];
7146                         action_flags |= MLX5_FLOW_ACTION_RSS;
7147                         ++actions_n;
7148                         break;
7149                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
7150                         ret =
7151                         mlx5_flow_validate_action_default_miss(action_flags,
7152                                         attr, error);
7153                         if (ret < 0)
7154                                 return ret;
7155                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
7156                         ++actions_n;
7157                         break;
7158                 case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
7159                 case RTE_FLOW_ACTION_TYPE_COUNT:
7160                         shared_count = is_shared_action_count(actions);
7161                         ret = flow_dv_validate_action_count(dev, shared_count,
7162                                                             action_flags,
7163                                                             error);
7164                         if (ret < 0)
7165                                 return ret;
7166                         action_flags |= MLX5_FLOW_ACTION_COUNT;
7167                         ++actions_n;
7168                         break;
7169                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
7170                         if (flow_dv_validate_action_pop_vlan(dev,
7171                                                              action_flags,
7172                                                              actions,
7173                                                              item_flags, attr,
7174                                                              error))
7175                                 return -rte_errno;
7176                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7177                                 modify_after_mirror = 1;
7178                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
7179                         ++actions_n;
7180                         break;
7181                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
7182                         ret = flow_dv_validate_action_push_vlan(dev,
7183                                                                 action_flags,
7184                                                                 vlan_m,
7185                                                                 actions, attr,
7186                                                                 error);
7187                         if (ret < 0)
7188                                 return ret;
7189                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7190                                 modify_after_mirror = 1;
7191                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
7192                         ++actions_n;
7193                         break;
7194                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
7195                         ret = flow_dv_validate_action_set_vlan_pcp
7196                                                 (action_flags, actions, error);
7197                         if (ret < 0)
7198                                 return ret;
7199                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7200                                 modify_after_mirror = 1;
7201                         /* Count PCP with push_vlan command. */
7202                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
7203                         break;
7204                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
7205                         ret = flow_dv_validate_action_set_vlan_vid
7206                                                 (item_flags, action_flags,
7207                                                  actions, error);
7208                         if (ret < 0)
7209                                 return ret;
7210                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7211                                 modify_after_mirror = 1;
7212                         /* Count VID with push_vlan command. */
7213                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
7214                         rw_act_num += MLX5_ACT_NUM_MDF_VID;
7215                         break;
7216                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
7217                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
7218                         ret = flow_dv_validate_action_l2_encap(dev,
7219                                                                action_flags,
7220                                                                actions, attr,
7221                                                                error);
7222                         if (ret < 0)
7223                                 return ret;
7224                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
7225                         ++actions_n;
7226                         break;
7227                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
7228                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
7229                         ret = flow_dv_validate_action_decap(dev, action_flags,
7230                                                             actions, item_flags,
7231                                                             attr, error);
7232                         if (ret < 0)
7233                                 return ret;
7234                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7235                                 modify_after_mirror = 1;
7236                         action_flags |= MLX5_FLOW_ACTION_DECAP;
7237                         ++actions_n;
7238                         break;
7239                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
7240                         ret = flow_dv_validate_action_raw_encap_decap
7241                                 (dev, NULL, actions->conf, attr, &action_flags,
7242                                  &actions_n, actions, item_flags, error);
7243                         if (ret < 0)
7244                                 return ret;
7245                         break;
7246                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
7247                         decap = actions->conf;
7248                         while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
7249                                 ;
7250                         if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
7251                                 encap = NULL;
7252                                 actions--;
7253                         } else {
7254                                 encap = actions->conf;
7255                         }
7256                         ret = flow_dv_validate_action_raw_encap_decap
7257                                            (dev,
7258                                             decap ? decap : &empty_decap, encap,
7259                                             attr, &action_flags, &actions_n,
7260                                             actions, item_flags, error);
7261                         if (ret < 0)
7262                                 return ret;
7263                         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
7264                             (action_flags & MLX5_FLOW_ACTION_DECAP))
7265                                 modify_after_mirror = 1;
7266                         break;
7267                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
7268                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
7269                         ret = flow_dv_validate_action_modify_mac(action_flags,
7270                                                                  actions,
7271                                                                  item_flags,
7272                                                                  error);
7273                         if (ret < 0)
7274                                 return ret;
7275                         /* Count all modify-header actions as one action. */
7276                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7277                                 ++actions_n;
7278                         action_flags |= actions->type ==
7279                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
7280                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
7281                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
7282                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7283                                 modify_after_mirror = 1;
7284                         /*
7285                          * Even if the source and destination MAC addresses have
7286                          * overlap in the header with 4B alignment, the convert
7287                          * function will handle them separately and 4 SW actions
7288                          * will be created. And 2 actions will be added each
7289                          * time no matter how many bytes of address will be set.
7290                          */
7291                         rw_act_num += MLX5_ACT_NUM_MDF_MAC;
7292                         break;
7293                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
7294                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
7295                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
7296                                                                   actions,
7297                                                                   item_flags,
7298                                                                   error);
7299                         if (ret < 0)
7300                                 return ret;
7301                         /* Count all modify-header actions as one action. */
7302                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7303                                 ++actions_n;
7304                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7305                                 modify_after_mirror = 1;
7306                         action_flags |= actions->type ==
7307                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
7308                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
7309                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
7310                         rw_act_num += MLX5_ACT_NUM_MDF_IPV4;
7311                         break;
7312                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
7313                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
7314                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
7315                                                                   actions,
7316                                                                   item_flags,
7317                                                                   error);
7318                         if (ret < 0)
7319                                 return ret;
7320                         if (item_ipv6_proto == IPPROTO_ICMPV6)
7321                                 return rte_flow_error_set(error, ENOTSUP,
7322                                         RTE_FLOW_ERROR_TYPE_ACTION,
7323                                         actions,
7324                                         "Can't change header "
7325                                         "with ICMPv6 proto");
7326                         /* Count all modify-header actions as one action. */
7327                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7328                                 ++actions_n;
7329                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7330                                 modify_after_mirror = 1;
7331                         action_flags |= actions->type ==
7332                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
7333                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
7334                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
7335                         rw_act_num += MLX5_ACT_NUM_MDF_IPV6;
7336                         break;
7337                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
7338                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
7339                         ret = flow_dv_validate_action_modify_tp(action_flags,
7340                                                                 actions,
7341                                                                 item_flags,
7342                                                                 error);
7343                         if (ret < 0)
7344                                 return ret;
7345                         /* Count all modify-header actions as one action. */
7346                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7347                                 ++actions_n;
7348                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7349                                 modify_after_mirror = 1;
7350                         action_flags |= actions->type ==
7351                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
7352                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
7353                                                 MLX5_FLOW_ACTION_SET_TP_DST;
7354                         rw_act_num += MLX5_ACT_NUM_MDF_PORT;
7355                         break;
7356                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
7357                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
7358                         ret = flow_dv_validate_action_modify_ttl(action_flags,
7359                                                                  actions,
7360                                                                  item_flags,
7361                                                                  error);
7362                         if (ret < 0)
7363                                 return ret;
7364                         /* Count all modify-header actions as one action. */
7365                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7366                                 ++actions_n;
7367                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7368                                 modify_after_mirror = 1;
7369                         action_flags |= actions->type ==
7370                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
7371                                                 MLX5_FLOW_ACTION_SET_TTL :
7372                                                 MLX5_FLOW_ACTION_DEC_TTL;
7373                         rw_act_num += MLX5_ACT_NUM_MDF_TTL;
7374                         break;
7375                 case RTE_FLOW_ACTION_TYPE_JUMP:
7376                         ret = flow_dv_validate_action_jump(dev, tunnel, actions,
7377                                                            action_flags,
7378                                                            attr, external,
7379                                                            error);
7380                         if (ret)
7381                                 return ret;
7382                         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
7383                             fdb_mirror_limit)
7384                                 return rte_flow_error_set(error, EINVAL,
7385                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7386                                                   NULL,
7387                                                   "sample and jump action combination is not supported");
7388                         ++actions_n;
7389                         action_flags |= MLX5_FLOW_ACTION_JUMP;
7390                         break;
7391                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
7392                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
7393                         ret = flow_dv_validate_action_modify_tcp_seq
7394                                                                 (action_flags,
7395                                                                  actions,
7396                                                                  item_flags,
7397                                                                  error);
7398                         if (ret < 0)
7399                                 return ret;
7400                         /* Count all modify-header actions as one action. */
7401                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7402                                 ++actions_n;
7403                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7404                                 modify_after_mirror = 1;
7405                         action_flags |= actions->type ==
7406                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
7407                                                 MLX5_FLOW_ACTION_INC_TCP_SEQ :
7408                                                 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
7409                         rw_act_num += MLX5_ACT_NUM_MDF_TCPSEQ;
7410                         break;
7411                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
7412                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
7413                         ret = flow_dv_validate_action_modify_tcp_ack
7414                                                                 (action_flags,
7415                                                                  actions,
7416                                                                  item_flags,
7417                                                                  error);
7418                         if (ret < 0)
7419                                 return ret;
7420                         /* Count all modify-header actions as one action. */
7421                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7422                                 ++actions_n;
7423                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7424                                 modify_after_mirror = 1;
7425                         action_flags |= actions->type ==
7426                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
7427                                                 MLX5_FLOW_ACTION_INC_TCP_ACK :
7428                                                 MLX5_FLOW_ACTION_DEC_TCP_ACK;
7429                         rw_act_num += MLX5_ACT_NUM_MDF_TCPACK;
7430                         break;
7431                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
7432                         break;
7433                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
7434                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
7435                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7436                         break;
7437                 case RTE_FLOW_ACTION_TYPE_METER:
7438                         ret = mlx5_flow_validate_action_meter(dev,
7439                                                               action_flags,
7440                                                               actions, attr,
7441                                                               &def_policy,
7442                                                               error);
7443                         if (ret < 0)
7444                                 return ret;
7445                         action_flags |= MLX5_FLOW_ACTION_METER;
7446                         if (!def_policy)
7447                                 action_flags |=
7448                                 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
7449                         ++actions_n;
7450                         /* Meter action will add one more TAG action. */
7451                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7452                         break;
7453                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
7454                         if (!attr->transfer && !attr->group)
7455                                 return rte_flow_error_set(error, ENOTSUP,
7456                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7457                                                                            NULL,
7458                           "Shared ASO age action is not supported for group 0");
7459                         if (action_flags & MLX5_FLOW_ACTION_AGE)
7460                                 return rte_flow_error_set
7461                                                   (error, EINVAL,
7462                                                    RTE_FLOW_ERROR_TYPE_ACTION,
7463                                                    NULL,
7464                                                    "duplicate age actions set");
7465                         action_flags |= MLX5_FLOW_ACTION_AGE;
7466                         ++actions_n;
7467                         break;
7468                 case RTE_FLOW_ACTION_TYPE_AGE:
7469                         ret = flow_dv_validate_action_age(action_flags,
7470                                                           actions, dev,
7471                                                           error);
7472                         if (ret < 0)
7473                                 return ret;
7474                         /*
7475                          * Validate the regular AGE action (using counter)
7476                          * mutual exclusion with share counter actions.
7477                          */
7478                         if (!priv->sh->flow_hit_aso_en) {
7479                                 if (shared_count)
7480                                         return rte_flow_error_set
7481                                                 (error, EINVAL,
7482                                                 RTE_FLOW_ERROR_TYPE_ACTION,
7483                                                 NULL,
7484                                                 "old age and shared count combination is not supported");
7485                                 if (sample_count)
7486                                         return rte_flow_error_set
7487                                                 (error, EINVAL,
7488                                                 RTE_FLOW_ERROR_TYPE_ACTION,
7489                                                 NULL,
7490                                                 "old age action and count must be in the same sub flow");
7491                         }
7492                         action_flags |= MLX5_FLOW_ACTION_AGE;
7493                         ++actions_n;
7494                         break;
7495                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
7496                         ret = flow_dv_validate_action_modify_ipv4_dscp
7497                                                          (action_flags,
7498                                                           actions,
7499                                                           item_flags,
7500                                                           error);
7501                         if (ret < 0)
7502                                 return ret;
7503                         /* Count all modify-header actions as one action. */
7504                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7505                                 ++actions_n;
7506                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7507                                 modify_after_mirror = 1;
7508                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
7509                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
7510                         break;
7511                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
7512                         ret = flow_dv_validate_action_modify_ipv6_dscp
7513                                                                 (action_flags,
7514                                                                  actions,
7515                                                                  item_flags,
7516                                                                  error);
7517                         if (ret < 0)
7518                                 return ret;
7519                         /* Count all modify-header actions as one action. */
7520                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7521                                 ++actions_n;
7522                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7523                                 modify_after_mirror = 1;
7524                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
7525                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
7526                         break;
7527                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
7528                         ret = flow_dv_validate_action_sample(&action_flags,
7529                                                              actions, dev,
7530                                                              attr, item_flags,
7531                                                              rss, &sample_rss,
7532                                                              &sample_count,
7533                                                              &fdb_mirror_limit,
7534                                                              error);
7535                         if (ret < 0)
7536                                 return ret;
7537                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
7538                         ++actions_n;
7539                         break;
7540                 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
7541                         ret = flow_dv_validate_action_modify_field(dev,
7542                                                                    action_flags,
7543                                                                    actions,
7544                                                                    attr,
7545                                                                    error);
7546                         if (ret < 0)
7547                                 return ret;
7548                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7549                                 modify_after_mirror = 1;
7550                         /* Count all modify-header actions as one action. */
7551                         if (!(action_flags & MLX5_FLOW_ACTION_MODIFY_FIELD))
7552                                 ++actions_n;
7553                         action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
7554                         rw_act_num += ret;
7555                         break;
7556                 case RTE_FLOW_ACTION_TYPE_CONNTRACK:
7557                         ret = flow_dv_validate_action_aso_ct(dev, action_flags,
7558                                                              item_flags, attr,
7559                                                              error);
7560                         if (ret < 0)
7561                                 return ret;
7562                         action_flags |= MLX5_FLOW_ACTION_CT;
7563                         break;
7564                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
7565                         /* tunnel offload action was processed before
7566                          * list it here as a supported type
7567                          */
7568                         break;
7569                 default:
7570                         return rte_flow_error_set(error, ENOTSUP,
7571                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7572                                                   actions,
7573                                                   "action not supported");
7574                 }
7575         }
7576         /*
7577          * Validate actions in flow rules
7578          * - Explicit decap action is prohibited by the tunnel offload API.
7579          * - Drop action in tunnel steer rule is prohibited by the API.
7580          * - Application cannot use MARK action because it's value can mask
7581          *   tunnel default miss nitification.
7582          * - JUMP in tunnel match rule has no support in current PMD
7583          *   implementation.
7584          * - TAG & META are reserved for future uses.
7585          */
7586         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_SET) {
7587                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_DECAP    |
7588                                             MLX5_FLOW_ACTION_MARK     |
7589                                             MLX5_FLOW_ACTION_SET_TAG  |
7590                                             MLX5_FLOW_ACTION_SET_META |
7591                                             MLX5_FLOW_ACTION_DROP;
7592
7593                 if (action_flags & bad_actions_mask)
7594                         return rte_flow_error_set
7595                                         (error, EINVAL,
7596                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7597                                         "Invalid RTE action in tunnel "
7598                                         "set decap rule");
7599                 if (!(action_flags & MLX5_FLOW_ACTION_JUMP))
7600                         return rte_flow_error_set
7601                                         (error, EINVAL,
7602                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7603                                         "tunnel set decap rule must terminate "
7604                                         "with JUMP");
7605                 if (!attr->ingress)
7606                         return rte_flow_error_set
7607                                         (error, EINVAL,
7608                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7609                                         "tunnel flows for ingress traffic only");
7610         }
7611         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_MATCH) {
7612                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_JUMP    |
7613                                             MLX5_FLOW_ACTION_MARK    |
7614                                             MLX5_FLOW_ACTION_SET_TAG |
7615                                             MLX5_FLOW_ACTION_SET_META;
7616
7617                 if (action_flags & bad_actions_mask)
7618                         return rte_flow_error_set
7619                                         (error, EINVAL,
7620                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7621                                         "Invalid RTE action in tunnel "
7622                                         "set match rule");
7623         }
7624         /*
7625          * Validate the drop action mutual exclusion with other actions.
7626          * Drop action is mutually-exclusive with any other action, except for
7627          * Count action.
7628          * Drop action compatibility with tunnel offload was already validated.
7629          */
7630         if (action_flags & (MLX5_FLOW_ACTION_TUNNEL_MATCH |
7631                             MLX5_FLOW_ACTION_TUNNEL_MATCH));
7632         else if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
7633             (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
7634                 return rte_flow_error_set(error, EINVAL,
7635                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7636                                           "Drop action is mutually-exclusive "
7637                                           "with any other action, except for "
7638                                           "Count action");
7639         /* Eswitch has few restrictions on using items and actions */
7640         if (attr->transfer) {
7641                 if (!mlx5_flow_ext_mreg_supported(dev) &&
7642                     action_flags & MLX5_FLOW_ACTION_FLAG)
7643                         return rte_flow_error_set(error, ENOTSUP,
7644                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7645                                                   NULL,
7646                                                   "unsupported action FLAG");
7647                 if (!mlx5_flow_ext_mreg_supported(dev) &&
7648                     action_flags & MLX5_FLOW_ACTION_MARK)
7649                         return rte_flow_error_set(error, ENOTSUP,
7650                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7651                                                   NULL,
7652                                                   "unsupported action MARK");
7653                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
7654                         return rte_flow_error_set(error, ENOTSUP,
7655                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7656                                                   NULL,
7657                                                   "unsupported action QUEUE");
7658                 if (action_flags & MLX5_FLOW_ACTION_RSS)
7659                         return rte_flow_error_set(error, ENOTSUP,
7660                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7661                                                   NULL,
7662                                                   "unsupported action RSS");
7663                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
7664                         return rte_flow_error_set(error, EINVAL,
7665                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7666                                                   actions,
7667                                                   "no fate action is found");
7668         } else {
7669                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
7670                         return rte_flow_error_set(error, EINVAL,
7671                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7672                                                   actions,
7673                                                   "no fate action is found");
7674         }
7675         /*
7676          * Continue validation for Xcap and VLAN actions.
7677          * If hairpin is working in explicit TX rule mode, there is no actions
7678          * splitting and the validation of hairpin ingress flow should be the
7679          * same as other standard flows.
7680          */
7681         if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS |
7682                              MLX5_FLOW_VLAN_ACTIONS)) &&
7683             (queue_index == 0xFFFF ||
7684              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN ||
7685              ((conf = mlx5_rxq_get_hairpin_conf(dev, queue_index)) != NULL &&
7686              conf->tx_explicit != 0))) {
7687                 if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
7688                     MLX5_FLOW_XCAP_ACTIONS)
7689                         return rte_flow_error_set(error, ENOTSUP,
7690                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7691                                                   NULL, "encap and decap "
7692                                                   "combination aren't supported");
7693                 if (!attr->transfer && attr->ingress) {
7694                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
7695                                 return rte_flow_error_set
7696                                                 (error, ENOTSUP,
7697                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7698                                                  NULL, "encap is not supported"
7699                                                  " for ingress traffic");
7700                         else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
7701                                 return rte_flow_error_set
7702                                                 (error, ENOTSUP,
7703                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7704                                                  NULL, "push VLAN action not "
7705                                                  "supported for ingress");
7706                         else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) ==
7707                                         MLX5_FLOW_VLAN_ACTIONS)
7708                                 return rte_flow_error_set
7709                                                 (error, ENOTSUP,
7710                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7711                                                  NULL, "no support for "
7712                                                  "multiple VLAN actions");
7713                 }
7714         }
7715         if (action_flags & MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY) {
7716                 if ((action_flags & (MLX5_FLOW_FATE_ACTIONS &
7717                         ~MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)) &&
7718                         attr->ingress)
7719                         return rte_flow_error_set
7720                                 (error, ENOTSUP,
7721                                 RTE_FLOW_ERROR_TYPE_ACTION,
7722                                 NULL, "fate action not supported for "
7723                                 "meter with policy");
7724                 if (attr->egress) {
7725                         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
7726                                 return rte_flow_error_set
7727                                         (error, ENOTSUP,
7728                                         RTE_FLOW_ERROR_TYPE_ACTION,
7729                                         NULL, "modify header action in egress "
7730                                         "cannot be done before meter action");
7731                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
7732                                 return rte_flow_error_set
7733                                         (error, ENOTSUP,
7734                                         RTE_FLOW_ERROR_TYPE_ACTION,
7735                                         NULL, "encap action in egress "
7736                                         "cannot be done before meter action");
7737                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
7738                                 return rte_flow_error_set
7739                                         (error, ENOTSUP,
7740                                         RTE_FLOW_ERROR_TYPE_ACTION,
7741                                         NULL, "push vlan action in egress "
7742                                         "cannot be done before meter action");
7743                 }
7744         }
7745         /*
7746          * Hairpin flow will add one more TAG action in TX implicit mode.
7747          * In TX explicit mode, there will be no hairpin flow ID.
7748          */
7749         if (hairpin > 0)
7750                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
7751         /* extra metadata enabled: one more TAG action will be add. */
7752         if (dev_conf->dv_flow_en &&
7753             dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
7754             mlx5_flow_ext_mreg_supported(dev))
7755                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
7756         if (rw_act_num >
7757                         flow_dv_modify_hdr_action_max(dev, is_root)) {
7758                 return rte_flow_error_set(error, ENOTSUP,
7759                                           RTE_FLOW_ERROR_TYPE_ACTION,
7760                                           NULL, "too many header modify"
7761                                           " actions to support");
7762         }
7763         /* Eswitch egress mirror and modify flow has limitation on CX5 */
7764         if (fdb_mirror_limit && modify_after_mirror)
7765                 return rte_flow_error_set(error, EINVAL,
7766                                 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7767                                 "sample before modify action is not supported");
7768         return 0;
7769 }
7770
7771 /**
7772  * Internal preparation function. Allocates the DV flow size,
7773  * this size is constant.
7774  *
7775  * @param[in] dev
7776  *   Pointer to the rte_eth_dev structure.
7777  * @param[in] attr
7778  *   Pointer to the flow attributes.
7779  * @param[in] items
7780  *   Pointer to the list of items.
7781  * @param[in] actions
7782  *   Pointer to the list of actions.
7783  * @param[out] error
7784  *   Pointer to the error structure.
7785  *
7786  * @return
7787  *   Pointer to mlx5_flow object on success,
7788  *   otherwise NULL and rte_errno is set.
7789  */
7790 static struct mlx5_flow *
7791 flow_dv_prepare(struct rte_eth_dev *dev,
7792                 const struct rte_flow_attr *attr __rte_unused,
7793                 const struct rte_flow_item items[] __rte_unused,
7794                 const struct rte_flow_action actions[] __rte_unused,
7795                 struct rte_flow_error *error)
7796 {
7797         uint32_t handle_idx = 0;
7798         struct mlx5_flow *dev_flow;
7799         struct mlx5_flow_handle *dev_handle;
7800         struct mlx5_priv *priv = dev->data->dev_private;
7801         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
7802
7803         MLX5_ASSERT(wks);
7804         wks->skip_matcher_reg = 0;
7805         /* In case of corrupting the memory. */
7806         if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
7807                 rte_flow_error_set(error, ENOSPC,
7808                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7809                                    "not free temporary device flow");
7810                 return NULL;
7811         }
7812         dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
7813                                    &handle_idx);
7814         if (!dev_handle) {
7815                 rte_flow_error_set(error, ENOMEM,
7816                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7817                                    "not enough memory to create flow handle");
7818                 return NULL;
7819         }
7820         MLX5_ASSERT(wks->flow_idx < RTE_DIM(wks->flows));
7821         dev_flow = &wks->flows[wks->flow_idx++];
7822         memset(dev_flow, 0, sizeof(*dev_flow));
7823         dev_flow->handle = dev_handle;
7824         dev_flow->handle_idx = handle_idx;
7825         /*
7826          * In some old rdma-core releases, before continuing, a check of the
7827          * length of matching parameter will be done at first. It needs to use
7828          * the length without misc4 param. If the flow has misc4 support, then
7829          * the length needs to be adjusted accordingly. Each param member is
7830          * aligned with a 64B boundary naturally.
7831          */
7832         dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param) -
7833                                   MLX5_ST_SZ_BYTES(fte_match_set_misc4);
7834         dev_flow->ingress = attr->ingress;
7835         dev_flow->dv.transfer = attr->transfer;
7836         return dev_flow;
7837 }
7838
7839 #ifdef RTE_LIBRTE_MLX5_DEBUG
7840 /**
7841  * Sanity check for match mask and value. Similar to check_valid_spec() in
7842  * kernel driver. If unmasked bit is present in value, it returns failure.
7843  *
7844  * @param match_mask
7845  *   pointer to match mask buffer.
7846  * @param match_value
7847  *   pointer to match value buffer.
7848  *
7849  * @return
7850  *   0 if valid, -EINVAL otherwise.
7851  */
7852 static int
7853 flow_dv_check_valid_spec(void *match_mask, void *match_value)
7854 {
7855         uint8_t *m = match_mask;
7856         uint8_t *v = match_value;
7857         unsigned int i;
7858
7859         for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
7860                 if (v[i] & ~m[i]) {
7861                         DRV_LOG(ERR,
7862                                 "match_value differs from match_criteria"
7863                                 " %p[%u] != %p[%u]",
7864                                 match_value, i, match_mask, i);
7865                         return -EINVAL;
7866                 }
7867         }
7868         return 0;
7869 }
7870 #endif
7871
7872 /**
7873  * Add match of ip_version.
7874  *
7875  * @param[in] group
7876  *   Flow group.
7877  * @param[in] headers_v
7878  *   Values header pointer.
7879  * @param[in] headers_m
7880  *   Masks header pointer.
7881  * @param[in] ip_version
7882  *   The IP version to set.
7883  */
7884 static inline void
7885 flow_dv_set_match_ip_version(uint32_t group,
7886                              void *headers_v,
7887                              void *headers_m,
7888                              uint8_t ip_version)
7889 {
7890         if (group == 0)
7891                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
7892         else
7893                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
7894                          ip_version);
7895         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version);
7896         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0);
7897         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0);
7898 }
7899
7900 /**
7901  * Add Ethernet item to matcher and to the value.
7902  *
7903  * @param[in, out] matcher
7904  *   Flow matcher.
7905  * @param[in, out] key
7906  *   Flow matcher value.
7907  * @param[in] item
7908  *   Flow pattern to translate.
7909  * @param[in] inner
7910  *   Item is inner pattern.
7911  */
7912 static void
7913 flow_dv_translate_item_eth(void *matcher, void *key,
7914                            const struct rte_flow_item *item, int inner,
7915                            uint32_t group)
7916 {
7917         const struct rte_flow_item_eth *eth_m = item->mask;
7918         const struct rte_flow_item_eth *eth_v = item->spec;
7919         const struct rte_flow_item_eth nic_mask = {
7920                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
7921                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
7922                 .type = RTE_BE16(0xffff),
7923                 .has_vlan = 0,
7924         };
7925         void *hdrs_m;
7926         void *hdrs_v;
7927         char *l24_v;
7928         unsigned int i;
7929
7930         if (!eth_v)
7931                 return;
7932         if (!eth_m)
7933                 eth_m = &nic_mask;
7934         if (inner) {
7935                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
7936                                          inner_headers);
7937                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7938         } else {
7939                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
7940                                          outer_headers);
7941                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7942         }
7943         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, dmac_47_16),
7944                &eth_m->dst, sizeof(eth_m->dst));
7945         /* The value must be in the range of the mask. */
7946         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, dmac_47_16);
7947         for (i = 0; i < sizeof(eth_m->dst); ++i)
7948                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
7949         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, smac_47_16),
7950                &eth_m->src, sizeof(eth_m->src));
7951         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, smac_47_16);
7952         /* The value must be in the range of the mask. */
7953         for (i = 0; i < sizeof(eth_m->dst); ++i)
7954                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
7955         /*
7956          * HW supports match on one Ethertype, the Ethertype following the last
7957          * VLAN tag of the packet (see PRM).
7958          * Set match on ethertype only if ETH header is not followed by VLAN.
7959          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
7960          * ethertype, and use ip_version field instead.
7961          * eCPRI over Ether layer will use type value 0xAEFE.
7962          */
7963         if (eth_m->type == 0xFFFF) {
7964                 /* Set cvlan_tag mask for any single\multi\un-tagged case. */
7965                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
7966                 switch (eth_v->type) {
7967                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
7968                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
7969                         return;
7970                 case RTE_BE16(RTE_ETHER_TYPE_QINQ):
7971                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
7972                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
7973                         return;
7974                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
7975                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
7976                         return;
7977                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
7978                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
7979                         return;
7980                 default:
7981                         break;
7982                 }
7983         }
7984         if (eth_m->has_vlan) {
7985                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
7986                 if (eth_v->has_vlan) {
7987                         /*
7988                          * Here, when also has_more_vlan field in VLAN item is
7989                          * not set, only single-tagged packets will be matched.
7990                          */
7991                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
7992                         return;
7993                 }
7994         }
7995         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
7996                  rte_be_to_cpu_16(eth_m->type));
7997         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
7998         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
7999 }
8000
8001 /**
8002  * Add VLAN item to matcher and to the value.
8003  *
8004  * @param[in, out] dev_flow
8005  *   Flow descriptor.
8006  * @param[in, out] matcher
8007  *   Flow matcher.
8008  * @param[in, out] key
8009  *   Flow matcher value.
8010  * @param[in] item
8011  *   Flow pattern to translate.
8012  * @param[in] inner
8013  *   Item is inner pattern.
8014  */
8015 static void
8016 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
8017                             void *matcher, void *key,
8018                             const struct rte_flow_item *item,
8019                             int inner, uint32_t group)
8020 {
8021         const struct rte_flow_item_vlan *vlan_m = item->mask;
8022         const struct rte_flow_item_vlan *vlan_v = item->spec;
8023         void *hdrs_m;
8024         void *hdrs_v;
8025         uint16_t tci_m;
8026         uint16_t tci_v;
8027
8028         if (inner) {
8029                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8030                                          inner_headers);
8031                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8032         } else {
8033                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8034                                          outer_headers);
8035                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8036                 /*
8037                  * This is workaround, masks are not supported,
8038                  * and pre-validated.
8039                  */
8040                 if (vlan_v)
8041                         dev_flow->handle->vf_vlan.tag =
8042                                         rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
8043         }
8044         /*
8045          * When VLAN item exists in flow, mark packet as tagged,
8046          * even if TCI is not specified.
8047          */
8048         if (!MLX5_GET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag)) {
8049                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8050                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8051         }
8052         if (!vlan_v)
8053                 return;
8054         if (!vlan_m)
8055                 vlan_m = &rte_flow_item_vlan_mask;
8056         tci_m = rte_be_to_cpu_16(vlan_m->tci);
8057         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
8058         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_vid, tci_m);
8059         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_vid, tci_v);
8060         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_cfi, tci_m >> 12);
8061         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_cfi, tci_v >> 12);
8062         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_prio, tci_m >> 13);
8063         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_prio, tci_v >> 13);
8064         /*
8065          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
8066          * ethertype, and use ip_version field instead.
8067          */
8068         if (vlan_m->inner_type == 0xFFFF) {
8069                 switch (vlan_v->inner_type) {
8070                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
8071                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8072                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8073                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
8074                         return;
8075                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
8076                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
8077                         return;
8078                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
8079                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
8080                         return;
8081                 default:
8082                         break;
8083                 }
8084         }
8085         if (vlan_m->has_more_vlan && vlan_v->has_more_vlan) {
8086                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8087                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8088                 /* Only one vlan_tag bit can be set. */
8089                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
8090                 return;
8091         }
8092         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
8093                  rte_be_to_cpu_16(vlan_m->inner_type));
8094         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, ethertype,
8095                  rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
8096 }
8097
8098 /**
8099  * Add IPV4 item to matcher and to the value.
8100  *
8101  * @param[in, out] matcher
8102  *   Flow matcher.
8103  * @param[in, out] key
8104  *   Flow matcher value.
8105  * @param[in] item
8106  *   Flow pattern to translate.
8107  * @param[in] inner
8108  *   Item is inner pattern.
8109  * @param[in] group
8110  *   The group to insert the rule.
8111  */
8112 static void
8113 flow_dv_translate_item_ipv4(void *matcher, void *key,
8114                             const struct rte_flow_item *item,
8115                             int inner, uint32_t group)
8116 {
8117         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
8118         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
8119         const struct rte_flow_item_ipv4 nic_mask = {
8120                 .hdr = {
8121                         .src_addr = RTE_BE32(0xffffffff),
8122                         .dst_addr = RTE_BE32(0xffffffff),
8123                         .type_of_service = 0xff,
8124                         .next_proto_id = 0xff,
8125                         .time_to_live = 0xff,
8126                 },
8127         };
8128         void *headers_m;
8129         void *headers_v;
8130         char *l24_m;
8131         char *l24_v;
8132         uint8_t tos;
8133
8134         if (inner) {
8135                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8136                                          inner_headers);
8137                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8138         } else {
8139                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8140                                          outer_headers);
8141                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8142         }
8143         flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
8144         if (!ipv4_v)
8145                 return;
8146         if (!ipv4_m)
8147                 ipv4_m = &nic_mask;
8148         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8149                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
8150         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8151                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
8152         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
8153         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
8154         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8155                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
8156         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8157                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
8158         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
8159         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
8160         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
8161         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
8162                  ipv4_m->hdr.type_of_service);
8163         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
8164         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
8165                  ipv4_m->hdr.type_of_service >> 2);
8166         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
8167         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8168                  ipv4_m->hdr.next_proto_id);
8169         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8170                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
8171         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
8172                  ipv4_m->hdr.time_to_live);
8173         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
8174                  ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
8175         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
8176                  !!(ipv4_m->hdr.fragment_offset));
8177         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
8178                  !!(ipv4_v->hdr.fragment_offset & ipv4_m->hdr.fragment_offset));
8179 }
8180
8181 /**
8182  * Add IPV6 item to matcher and to the value.
8183  *
8184  * @param[in, out] matcher
8185  *   Flow matcher.
8186  * @param[in, out] key
8187  *   Flow matcher value.
8188  * @param[in] item
8189  *   Flow pattern to translate.
8190  * @param[in] inner
8191  *   Item is inner pattern.
8192  * @param[in] group
8193  *   The group to insert the rule.
8194  */
8195 static void
8196 flow_dv_translate_item_ipv6(void *matcher, void *key,
8197                             const struct rte_flow_item *item,
8198                             int inner, uint32_t group)
8199 {
8200         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
8201         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
8202         const struct rte_flow_item_ipv6 nic_mask = {
8203                 .hdr = {
8204                         .src_addr =
8205                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
8206                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
8207                         .dst_addr =
8208                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
8209                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
8210                         .vtc_flow = RTE_BE32(0xffffffff),
8211                         .proto = 0xff,
8212                         .hop_limits = 0xff,
8213                 },
8214         };
8215         void *headers_m;
8216         void *headers_v;
8217         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8218         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8219         char *l24_m;
8220         char *l24_v;
8221         uint32_t vtc_m;
8222         uint32_t vtc_v;
8223         int i;
8224         int size;
8225
8226         if (inner) {
8227                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8228                                          inner_headers);
8229                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8230         } else {
8231                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8232                                          outer_headers);
8233                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8234         }
8235         flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
8236         if (!ipv6_v)
8237                 return;
8238         if (!ipv6_m)
8239                 ipv6_m = &nic_mask;
8240         size = sizeof(ipv6_m->hdr.dst_addr);
8241         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8242                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
8243         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8244                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
8245         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
8246         for (i = 0; i < size; ++i)
8247                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
8248         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8249                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
8250         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8251                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
8252         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
8253         for (i = 0; i < size; ++i)
8254                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
8255         /* TOS. */
8256         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
8257         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
8258         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
8259         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
8260         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
8261         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
8262         /* Label. */
8263         if (inner) {
8264                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
8265                          vtc_m);
8266                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
8267                          vtc_v);
8268         } else {
8269                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
8270                          vtc_m);
8271                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
8272                          vtc_v);
8273         }
8274         /* Protocol. */
8275         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8276                  ipv6_m->hdr.proto);
8277         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8278                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
8279         /* Hop limit. */
8280         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
8281                  ipv6_m->hdr.hop_limits);
8282         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
8283                  ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
8284         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
8285                  !!(ipv6_m->has_frag_ext));
8286         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
8287                  !!(ipv6_v->has_frag_ext & ipv6_m->has_frag_ext));
8288 }
8289
8290 /**
8291  * Add IPV6 fragment extension item to matcher and to the value.
8292  *
8293  * @param[in, out] matcher
8294  *   Flow matcher.
8295  * @param[in, out] key
8296  *   Flow matcher value.
8297  * @param[in] item
8298  *   Flow pattern to translate.
8299  * @param[in] inner
8300  *   Item is inner pattern.
8301  */
8302 static void
8303 flow_dv_translate_item_ipv6_frag_ext(void *matcher, void *key,
8304                                      const struct rte_flow_item *item,
8305                                      int inner)
8306 {
8307         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_m = item->mask;
8308         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_v = item->spec;
8309         const struct rte_flow_item_ipv6_frag_ext nic_mask = {
8310                 .hdr = {
8311                         .next_header = 0xff,
8312                         .frag_data = RTE_BE16(0xffff),
8313                 },
8314         };
8315         void *headers_m;
8316         void *headers_v;
8317
8318         if (inner) {
8319                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8320                                          inner_headers);
8321                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8322         } else {
8323                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8324                                          outer_headers);
8325                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8326         }
8327         /* IPv6 fragment extension item exists, so packet is IP fragment. */
8328         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
8329         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 1);
8330         if (!ipv6_frag_ext_v)
8331                 return;
8332         if (!ipv6_frag_ext_m)
8333                 ipv6_frag_ext_m = &nic_mask;
8334         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8335                  ipv6_frag_ext_m->hdr.next_header);
8336         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8337                  ipv6_frag_ext_v->hdr.next_header &
8338                  ipv6_frag_ext_m->hdr.next_header);
8339 }
8340
8341 /**
8342  * Add TCP item to matcher and to the value.
8343  *
8344  * @param[in, out] matcher
8345  *   Flow matcher.
8346  * @param[in, out] key
8347  *   Flow matcher value.
8348  * @param[in] item
8349  *   Flow pattern to translate.
8350  * @param[in] inner
8351  *   Item is inner pattern.
8352  */
8353 static void
8354 flow_dv_translate_item_tcp(void *matcher, void *key,
8355                            const struct rte_flow_item *item,
8356                            int inner)
8357 {
8358         const struct rte_flow_item_tcp *tcp_m = item->mask;
8359         const struct rte_flow_item_tcp *tcp_v = item->spec;
8360         void *headers_m;
8361         void *headers_v;
8362
8363         if (inner) {
8364                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8365                                          inner_headers);
8366                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8367         } else {
8368                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8369                                          outer_headers);
8370                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8371         }
8372         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8373         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
8374         if (!tcp_v)
8375                 return;
8376         if (!tcp_m)
8377                 tcp_m = &rte_flow_item_tcp_mask;
8378         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
8379                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
8380         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
8381                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
8382         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
8383                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
8384         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
8385                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
8386         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
8387                  tcp_m->hdr.tcp_flags);
8388         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
8389                  (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
8390 }
8391
8392 /**
8393  * Add UDP item to matcher and to the value.
8394  *
8395  * @param[in, out] matcher
8396  *   Flow matcher.
8397  * @param[in, out] key
8398  *   Flow matcher value.
8399  * @param[in] item
8400  *   Flow pattern to translate.
8401  * @param[in] inner
8402  *   Item is inner pattern.
8403  */
8404 static void
8405 flow_dv_translate_item_udp(void *matcher, void *key,
8406                            const struct rte_flow_item *item,
8407                            int inner)
8408 {
8409         const struct rte_flow_item_udp *udp_m = item->mask;
8410         const struct rte_flow_item_udp *udp_v = item->spec;
8411         void *headers_m;
8412         void *headers_v;
8413
8414         if (inner) {
8415                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8416                                          inner_headers);
8417                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8418         } else {
8419                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8420                                          outer_headers);
8421                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8422         }
8423         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8424         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
8425         if (!udp_v)
8426                 return;
8427         if (!udp_m)
8428                 udp_m = &rte_flow_item_udp_mask;
8429         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
8430                  rte_be_to_cpu_16(udp_m->hdr.src_port));
8431         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
8432                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
8433         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
8434                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
8435         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
8436                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
8437 }
8438
8439 /**
8440  * Add GRE optional Key item to matcher and to the value.
8441  *
8442  * @param[in, out] matcher
8443  *   Flow matcher.
8444  * @param[in, out] key
8445  *   Flow matcher value.
8446  * @param[in] item
8447  *   Flow pattern to translate.
8448  * @param[in] inner
8449  *   Item is inner pattern.
8450  */
8451 static void
8452 flow_dv_translate_item_gre_key(void *matcher, void *key,
8453                                    const struct rte_flow_item *item)
8454 {
8455         const rte_be32_t *key_m = item->mask;
8456         const rte_be32_t *key_v = item->spec;
8457         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8458         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8459         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
8460
8461         /* GRE K bit must be on and should already be validated */
8462         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
8463         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
8464         if (!key_v)
8465                 return;
8466         if (!key_m)
8467                 key_m = &gre_key_default_mask;
8468         MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
8469                  rte_be_to_cpu_32(*key_m) >> 8);
8470         MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
8471                  rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
8472         MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
8473                  rte_be_to_cpu_32(*key_m) & 0xFF);
8474         MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
8475                  rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
8476 }
8477
8478 /**
8479  * Add GRE item to matcher and to the value.
8480  *
8481  * @param[in, out] matcher
8482  *   Flow matcher.
8483  * @param[in, out] key
8484  *   Flow matcher value.
8485  * @param[in] item
8486  *   Flow pattern to translate.
8487  * @param[in] inner
8488  *   Item is inner pattern.
8489  */
8490 static void
8491 flow_dv_translate_item_gre(void *matcher, void *key,
8492                            const struct rte_flow_item *item,
8493                            int inner)
8494 {
8495         const struct rte_flow_item_gre *gre_m = item->mask;
8496         const struct rte_flow_item_gre *gre_v = item->spec;
8497         void *headers_m;
8498         void *headers_v;
8499         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8500         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8501         struct {
8502                 union {
8503                         __extension__
8504                         struct {
8505                                 uint16_t version:3;
8506                                 uint16_t rsvd0:9;
8507                                 uint16_t s_present:1;
8508                                 uint16_t k_present:1;
8509                                 uint16_t rsvd_bit1:1;
8510                                 uint16_t c_present:1;
8511                         };
8512                         uint16_t value;
8513                 };
8514         } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
8515
8516         if (inner) {
8517                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8518                                          inner_headers);
8519                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8520         } else {
8521                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8522                                          outer_headers);
8523                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8524         }
8525         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8526         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
8527         if (!gre_v)
8528                 return;
8529         if (!gre_m)
8530                 gre_m = &rte_flow_item_gre_mask;
8531         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
8532                  rte_be_to_cpu_16(gre_m->protocol));
8533         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
8534                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
8535         gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
8536         gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
8537         MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
8538                  gre_crks_rsvd0_ver_m.c_present);
8539         MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
8540                  gre_crks_rsvd0_ver_v.c_present &
8541                  gre_crks_rsvd0_ver_m.c_present);
8542         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
8543                  gre_crks_rsvd0_ver_m.k_present);
8544         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
8545                  gre_crks_rsvd0_ver_v.k_present &
8546                  gre_crks_rsvd0_ver_m.k_present);
8547         MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
8548                  gre_crks_rsvd0_ver_m.s_present);
8549         MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
8550                  gre_crks_rsvd0_ver_v.s_present &
8551                  gre_crks_rsvd0_ver_m.s_present);
8552 }
8553
8554 /**
8555  * Add NVGRE item to matcher and to the value.
8556  *
8557  * @param[in, out] matcher
8558  *   Flow matcher.
8559  * @param[in, out] key
8560  *   Flow matcher value.
8561  * @param[in] item
8562  *   Flow pattern to translate.
8563  * @param[in] inner
8564  *   Item is inner pattern.
8565  */
8566 static void
8567 flow_dv_translate_item_nvgre(void *matcher, void *key,
8568                              const struct rte_flow_item *item,
8569                              int inner)
8570 {
8571         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
8572         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
8573         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8574         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8575         const char *tni_flow_id_m;
8576         const char *tni_flow_id_v;
8577         char *gre_key_m;
8578         char *gre_key_v;
8579         int size;
8580         int i;
8581
8582         /* For NVGRE, GRE header fields must be set with defined values. */
8583         const struct rte_flow_item_gre gre_spec = {
8584                 .c_rsvd0_ver = RTE_BE16(0x2000),
8585                 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
8586         };
8587         const struct rte_flow_item_gre gre_mask = {
8588                 .c_rsvd0_ver = RTE_BE16(0xB000),
8589                 .protocol = RTE_BE16(UINT16_MAX),
8590         };
8591         const struct rte_flow_item gre_item = {
8592                 .spec = &gre_spec,
8593                 .mask = &gre_mask,
8594                 .last = NULL,
8595         };
8596         flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
8597         if (!nvgre_v)
8598                 return;
8599         if (!nvgre_m)
8600                 nvgre_m = &rte_flow_item_nvgre_mask;
8601         tni_flow_id_m = (const char *)nvgre_m->tni;
8602         tni_flow_id_v = (const char *)nvgre_v->tni;
8603         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
8604         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
8605         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
8606         memcpy(gre_key_m, tni_flow_id_m, size);
8607         for (i = 0; i < size; ++i)
8608                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
8609 }
8610
8611 /**
8612  * Add VXLAN item to matcher and to the value.
8613  *
8614  * @param[in, out] matcher
8615  *   Flow matcher.
8616  * @param[in, out] key
8617  *   Flow matcher value.
8618  * @param[in] item
8619  *   Flow pattern to translate.
8620  * @param[in] inner
8621  *   Item is inner pattern.
8622  */
8623 static void
8624 flow_dv_translate_item_vxlan(void *matcher, void *key,
8625                              const struct rte_flow_item *item,
8626                              int inner)
8627 {
8628         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
8629         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
8630         void *headers_m;
8631         void *headers_v;
8632         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8633         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8634         char *vni_m;
8635         char *vni_v;
8636         uint16_t dport;
8637         int size;
8638         int i;
8639
8640         if (inner) {
8641                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8642                                          inner_headers);
8643                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8644         } else {
8645                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8646                                          outer_headers);
8647                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8648         }
8649         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
8650                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
8651         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8652                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8653                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8654         }
8655         if (!vxlan_v)
8656                 return;
8657         if (!vxlan_m)
8658                 vxlan_m = &rte_flow_item_vxlan_mask;
8659         size = sizeof(vxlan_m->vni);
8660         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
8661         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
8662         memcpy(vni_m, vxlan_m->vni, size);
8663         for (i = 0; i < size; ++i)
8664                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
8665 }
8666
8667 /**
8668  * Add VXLAN-GPE item to matcher and to the value.
8669  *
8670  * @param[in, out] matcher
8671  *   Flow matcher.
8672  * @param[in, out] key
8673  *   Flow matcher value.
8674  * @param[in] item
8675  *   Flow pattern to translate.
8676  * @param[in] inner
8677  *   Item is inner pattern.
8678  */
8679
8680 static void
8681 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
8682                                  const struct rte_flow_item *item, int inner)
8683 {
8684         const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
8685         const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
8686         void *headers_m;
8687         void *headers_v;
8688         void *misc_m =
8689                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
8690         void *misc_v =
8691                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
8692         char *vni_m;
8693         char *vni_v;
8694         uint16_t dport;
8695         int size;
8696         int i;
8697         uint8_t flags_m = 0xff;
8698         uint8_t flags_v = 0xc;
8699
8700         if (inner) {
8701                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8702                                          inner_headers);
8703                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8704         } else {
8705                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8706                                          outer_headers);
8707                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8708         }
8709         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
8710                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
8711         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8712                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8713                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8714         }
8715         if (!vxlan_v)
8716                 return;
8717         if (!vxlan_m)
8718                 vxlan_m = &rte_flow_item_vxlan_gpe_mask;
8719         size = sizeof(vxlan_m->vni);
8720         vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
8721         vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
8722         memcpy(vni_m, vxlan_m->vni, size);
8723         for (i = 0; i < size; ++i)
8724                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
8725         if (vxlan_m->flags) {
8726                 flags_m = vxlan_m->flags;
8727                 flags_v = vxlan_v->flags;
8728         }
8729         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
8730         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
8731         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol,
8732                  vxlan_m->protocol);
8733         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol,
8734                  vxlan_v->protocol);
8735 }
8736
8737 /**
8738  * Add Geneve item to matcher and to the value.
8739  *
8740  * @param[in, out] matcher
8741  *   Flow matcher.
8742  * @param[in, out] key
8743  *   Flow matcher value.
8744  * @param[in] item
8745  *   Flow pattern to translate.
8746  * @param[in] inner
8747  *   Item is inner pattern.
8748  */
8749
8750 static void
8751 flow_dv_translate_item_geneve(void *matcher, void *key,
8752                               const struct rte_flow_item *item, int inner)
8753 {
8754         const struct rte_flow_item_geneve *geneve_m = item->mask;
8755         const struct rte_flow_item_geneve *geneve_v = item->spec;
8756         void *headers_m;
8757         void *headers_v;
8758         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8759         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8760         uint16_t dport;
8761         uint16_t gbhdr_m;
8762         uint16_t gbhdr_v;
8763         char *vni_m;
8764         char *vni_v;
8765         size_t size, i;
8766
8767         if (inner) {
8768                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8769                                          inner_headers);
8770                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8771         } else {
8772                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8773                                          outer_headers);
8774                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8775         }
8776         dport = MLX5_UDP_PORT_GENEVE;
8777         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8778                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8779                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8780         }
8781         if (!geneve_v)
8782                 return;
8783         if (!geneve_m)
8784                 geneve_m = &rte_flow_item_geneve_mask;
8785         size = sizeof(geneve_m->vni);
8786         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
8787         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
8788         memcpy(vni_m, geneve_m->vni, size);
8789         for (i = 0; i < size; ++i)
8790                 vni_v[i] = vni_m[i] & geneve_v->vni[i];
8791         MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
8792                  rte_be_to_cpu_16(geneve_m->protocol));
8793         MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
8794                  rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
8795         gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
8796         gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
8797         MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
8798                  MLX5_GENEVE_OAMF_VAL(gbhdr_m));
8799         MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
8800                  MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
8801         MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
8802                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
8803         MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
8804                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
8805                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
8806 }
8807
8808 /**
8809  * Create Geneve TLV option resource.
8810  *
8811  * @param dev[in, out]
8812  *   Pointer to rte_eth_dev structure.
8813  * @param[in, out] tag_be24
8814  *   Tag value in big endian then R-shift 8.
8815  * @parm[in, out] dev_flow
8816  *   Pointer to the dev_flow.
8817  * @param[out] error
8818  *   pointer to error structure.
8819  *
8820  * @return
8821  *   0 on success otherwise -errno and errno is set.
8822  */
8823
8824 int
8825 flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev,
8826                                              const struct rte_flow_item *item,
8827                                              struct rte_flow_error *error)
8828 {
8829         struct mlx5_priv *priv = dev->data->dev_private;
8830         struct mlx5_dev_ctx_shared *sh = priv->sh;
8831         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
8832                         sh->geneve_tlv_option_resource;
8833         struct mlx5_devx_obj *obj;
8834         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
8835         int ret = 0;
8836
8837         if (!geneve_opt_v)
8838                 return -1;
8839         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
8840         if (geneve_opt_resource != NULL) {
8841                 if (geneve_opt_resource->option_class ==
8842                         geneve_opt_v->option_class &&
8843                         geneve_opt_resource->option_type ==
8844                         geneve_opt_v->option_type &&
8845                         geneve_opt_resource->length ==
8846                         geneve_opt_v->option_len) {
8847                         /* We already have GENVE TLV option obj allocated. */
8848                         __atomic_fetch_add(&geneve_opt_resource->refcnt, 1,
8849                                            __ATOMIC_RELAXED);
8850                 } else {
8851                         ret = rte_flow_error_set(error, ENOMEM,
8852                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8853                                 "Only one GENEVE TLV option supported");
8854                         goto exit;
8855                 }
8856         } else {
8857                 /* Create a GENEVE TLV object and resource. */
8858                 obj = mlx5_devx_cmd_create_geneve_tlv_option(sh->ctx,
8859                                 geneve_opt_v->option_class,
8860                                 geneve_opt_v->option_type,
8861                                 geneve_opt_v->option_len);
8862                 if (!obj) {
8863                         ret = rte_flow_error_set(error, ENODATA,
8864                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8865                                 "Failed to create GENEVE TLV Devx object");
8866                         goto exit;
8867                 }
8868                 sh->geneve_tlv_option_resource =
8869                                 mlx5_malloc(MLX5_MEM_ZERO,
8870                                                 sizeof(*geneve_opt_resource),
8871                                                 0, SOCKET_ID_ANY);
8872                 if (!sh->geneve_tlv_option_resource) {
8873                         claim_zero(mlx5_devx_cmd_destroy(obj));
8874                         ret = rte_flow_error_set(error, ENOMEM,
8875                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8876                                 "GENEVE TLV object memory allocation failed");
8877                         goto exit;
8878                 }
8879                 geneve_opt_resource = sh->geneve_tlv_option_resource;
8880                 geneve_opt_resource->obj = obj;
8881                 geneve_opt_resource->option_class = geneve_opt_v->option_class;
8882                 geneve_opt_resource->option_type = geneve_opt_v->option_type;
8883                 geneve_opt_resource->length = geneve_opt_v->option_len;
8884                 __atomic_store_n(&geneve_opt_resource->refcnt, 1,
8885                                 __ATOMIC_RELAXED);
8886         }
8887 exit:
8888         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
8889         return ret;
8890 }
8891
8892 /**
8893  * Add Geneve TLV option item to matcher.
8894  *
8895  * @param[in, out] dev
8896  *   Pointer to rte_eth_dev structure.
8897  * @param[in, out] matcher
8898  *   Flow matcher.
8899  * @param[in, out] key
8900  *   Flow matcher value.
8901  * @param[in] item
8902  *   Flow pattern to translate.
8903  * @param[out] error
8904  *   Pointer to error structure.
8905  */
8906 static int
8907 flow_dv_translate_item_geneve_opt(struct rte_eth_dev *dev, void *matcher,
8908                                   void *key, const struct rte_flow_item *item,
8909                                   struct rte_flow_error *error)
8910 {
8911         const struct rte_flow_item_geneve_opt *geneve_opt_m = item->mask;
8912         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
8913         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8914         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8915         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
8916                         misc_parameters_3);
8917         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
8918         rte_be32_t opt_data_key = 0, opt_data_mask = 0;
8919         int ret = 0;
8920
8921         if (!geneve_opt_v)
8922                 return -1;
8923         if (!geneve_opt_m)
8924                 geneve_opt_m = &rte_flow_item_geneve_opt_mask;
8925         ret = flow_dev_geneve_tlv_option_resource_register(dev, item,
8926                                                            error);
8927         if (ret) {
8928                 DRV_LOG(ERR, "Failed to create geneve_tlv_obj");
8929                 return ret;
8930         }
8931         /*
8932          * Set the option length in GENEVE header if not requested.
8933          * The GENEVE TLV option length is expressed by the option length field
8934          * in the GENEVE header.
8935          * If the option length was not requested but the GENEVE TLV option item
8936          * is present we set the option length field implicitly.
8937          */
8938         if (!MLX5_GET16(fte_match_set_misc, misc_m, geneve_opt_len)) {
8939                 MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
8940                          MLX5_GENEVE_OPTLEN_MASK);
8941                 MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
8942                          geneve_opt_v->option_len + 1);
8943         }
8944         /* Set the data. */
8945         if (geneve_opt_v->data) {
8946                 memcpy(&opt_data_key, geneve_opt_v->data,
8947                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
8948                                 sizeof(opt_data_key)));
8949                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
8950                                 sizeof(opt_data_key));
8951                 memcpy(&opt_data_mask, geneve_opt_m->data,
8952                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
8953                                 sizeof(opt_data_mask)));
8954                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
8955                                 sizeof(opt_data_mask));
8956                 MLX5_SET(fte_match_set_misc3, misc3_m,
8957                                 geneve_tlv_option_0_data,
8958                                 rte_be_to_cpu_32(opt_data_mask));
8959                 MLX5_SET(fte_match_set_misc3, misc3_v,
8960                                 geneve_tlv_option_0_data,
8961                         rte_be_to_cpu_32(opt_data_key & opt_data_mask));
8962         }
8963         return ret;
8964 }
8965
8966 /**
8967  * Add MPLS item to matcher and to the value.
8968  *
8969  * @param[in, out] matcher
8970  *   Flow matcher.
8971  * @param[in, out] key
8972  *   Flow matcher value.
8973  * @param[in] item
8974  *   Flow pattern to translate.
8975  * @param[in] prev_layer
8976  *   The protocol layer indicated in previous item.
8977  * @param[in] inner
8978  *   Item is inner pattern.
8979  */
8980 static void
8981 flow_dv_translate_item_mpls(void *matcher, void *key,
8982                             const struct rte_flow_item *item,
8983                             uint64_t prev_layer,
8984                             int inner)
8985 {
8986         const uint32_t *in_mpls_m = item->mask;
8987         const uint32_t *in_mpls_v = item->spec;
8988         uint32_t *out_mpls_m = 0;
8989         uint32_t *out_mpls_v = 0;
8990         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8991         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8992         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
8993                                      misc_parameters_2);
8994         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
8995         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
8996         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8997
8998         switch (prev_layer) {
8999         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
9000                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
9001                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
9002                          MLX5_UDP_PORT_MPLS);
9003                 break;
9004         case MLX5_FLOW_LAYER_GRE:
9005                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
9006                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
9007                          RTE_ETHER_TYPE_MPLS);
9008                 break;
9009         default:
9010                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
9011                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
9012                          IPPROTO_MPLS);
9013                 break;
9014         }
9015         if (!in_mpls_v)
9016                 return;
9017         if (!in_mpls_m)
9018                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
9019         switch (prev_layer) {
9020         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
9021                 out_mpls_m =
9022                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
9023                                                  outer_first_mpls_over_udp);
9024                 out_mpls_v =
9025                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
9026                                                  outer_first_mpls_over_udp);
9027                 break;
9028         case MLX5_FLOW_LAYER_GRE:
9029                 out_mpls_m =
9030                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
9031                                                  outer_first_mpls_over_gre);
9032                 out_mpls_v =
9033                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
9034                                                  outer_first_mpls_over_gre);
9035                 break;
9036         default:
9037                 /* Inner MPLS not over GRE is not supported. */
9038                 if (!inner) {
9039                         out_mpls_m =
9040                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
9041                                                          misc2_m,
9042                                                          outer_first_mpls);
9043                         out_mpls_v =
9044                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
9045                                                          misc2_v,
9046                                                          outer_first_mpls);
9047                 }
9048                 break;
9049         }
9050         if (out_mpls_m && out_mpls_v) {
9051                 *out_mpls_m = *in_mpls_m;
9052                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
9053         }
9054 }
9055
9056 /**
9057  * Add metadata register item to matcher
9058  *
9059  * @param[in, out] matcher
9060  *   Flow matcher.
9061  * @param[in, out] key
9062  *   Flow matcher value.
9063  * @param[in] reg_type
9064  *   Type of device metadata register
9065  * @param[in] value
9066  *   Register value
9067  * @param[in] mask
9068  *   Register mask
9069  */
9070 static void
9071 flow_dv_match_meta_reg(void *matcher, void *key,
9072                        enum modify_reg reg_type,
9073                        uint32_t data, uint32_t mask)
9074 {
9075         void *misc2_m =
9076                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
9077         void *misc2_v =
9078                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
9079         uint32_t temp;
9080
9081         data &= mask;
9082         switch (reg_type) {
9083         case REG_A:
9084                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
9085                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
9086                 break;
9087         case REG_B:
9088                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
9089                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
9090                 break;
9091         case REG_C_0:
9092                 /*
9093                  * The metadata register C0 field might be divided into
9094                  * source vport index and META item value, we should set
9095                  * this field according to specified mask, not as whole one.
9096                  */
9097                 temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
9098                 temp |= mask;
9099                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
9100                 temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
9101                 temp &= ~mask;
9102                 temp |= data;
9103                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
9104                 break;
9105         case REG_C_1:
9106                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
9107                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
9108                 break;
9109         case REG_C_2:
9110                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
9111                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
9112                 break;
9113         case REG_C_3:
9114                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
9115                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
9116                 break;
9117         case REG_C_4:
9118                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
9119                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
9120                 break;
9121         case REG_C_5:
9122                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
9123                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
9124                 break;
9125         case REG_C_6:
9126                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
9127                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
9128                 break;
9129         case REG_C_7:
9130                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
9131                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
9132                 break;
9133         default:
9134                 MLX5_ASSERT(false);
9135                 break;
9136         }
9137 }
9138
9139 /**
9140  * Add MARK item to matcher
9141  *
9142  * @param[in] dev
9143  *   The device to configure through.
9144  * @param[in, out] matcher
9145  *   Flow matcher.
9146  * @param[in, out] key
9147  *   Flow matcher value.
9148  * @param[in] item
9149  *   Flow pattern to translate.
9150  */
9151 static void
9152 flow_dv_translate_item_mark(struct rte_eth_dev *dev,
9153                             void *matcher, void *key,
9154                             const struct rte_flow_item *item)
9155 {
9156         struct mlx5_priv *priv = dev->data->dev_private;
9157         const struct rte_flow_item_mark *mark;
9158         uint32_t value;
9159         uint32_t mask;
9160
9161         mark = item->mask ? (const void *)item->mask :
9162                             &rte_flow_item_mark_mask;
9163         mask = mark->id & priv->sh->dv_mark_mask;
9164         mark = (const void *)item->spec;
9165         MLX5_ASSERT(mark);
9166         value = mark->id & priv->sh->dv_mark_mask & mask;
9167         if (mask) {
9168                 enum modify_reg reg;
9169
9170                 /* Get the metadata register index for the mark. */
9171                 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
9172                 MLX5_ASSERT(reg > 0);
9173                 if (reg == REG_C_0) {
9174                         struct mlx5_priv *priv = dev->data->dev_private;
9175                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9176                         uint32_t shl_c0 = rte_bsf32(msk_c0);
9177
9178                         mask &= msk_c0;
9179                         mask <<= shl_c0;
9180                         value <<= shl_c0;
9181                 }
9182                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
9183         }
9184 }
9185
9186 /**
9187  * Add META item to matcher
9188  *
9189  * @param[in] dev
9190  *   The devich to configure through.
9191  * @param[in, out] matcher
9192  *   Flow matcher.
9193  * @param[in, out] key
9194  *   Flow matcher value.
9195  * @param[in] attr
9196  *   Attributes of flow that includes this item.
9197  * @param[in] item
9198  *   Flow pattern to translate.
9199  */
9200 static void
9201 flow_dv_translate_item_meta(struct rte_eth_dev *dev,
9202                             void *matcher, void *key,
9203                             const struct rte_flow_attr *attr,
9204                             const struct rte_flow_item *item)
9205 {
9206         const struct rte_flow_item_meta *meta_m;
9207         const struct rte_flow_item_meta *meta_v;
9208
9209         meta_m = (const void *)item->mask;
9210         if (!meta_m)
9211                 meta_m = &rte_flow_item_meta_mask;
9212         meta_v = (const void *)item->spec;
9213         if (meta_v) {
9214                 int reg;
9215                 uint32_t value = meta_v->data;
9216                 uint32_t mask = meta_m->data;
9217
9218                 reg = flow_dv_get_metadata_reg(dev, attr, NULL);
9219                 if (reg < 0)
9220                         return;
9221                 MLX5_ASSERT(reg != REG_NON);
9222                 /*
9223                  * In datapath code there is no endianness
9224                  * coversions for perfromance reasons, all
9225                  * pattern conversions are done in rte_flow.
9226                  */
9227                 value = rte_cpu_to_be_32(value);
9228                 mask = rte_cpu_to_be_32(mask);
9229                 if (reg == REG_C_0) {
9230                         struct mlx5_priv *priv = dev->data->dev_private;
9231                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9232                         uint32_t shl_c0 = rte_bsf32(msk_c0);
9233 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
9234                         uint32_t shr_c0 = __builtin_clz(priv->sh->dv_meta_mask);
9235
9236                         value >>= shr_c0;
9237                         mask >>= shr_c0;
9238 #endif
9239                         value <<= shl_c0;
9240                         mask <<= shl_c0;
9241                         MLX5_ASSERT(msk_c0);
9242                         MLX5_ASSERT(!(~msk_c0 & mask));
9243                 }
9244                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
9245         }
9246 }
9247
9248 /**
9249  * Add vport metadata Reg C0 item to matcher
9250  *
9251  * @param[in, out] matcher
9252  *   Flow matcher.
9253  * @param[in, out] key
9254  *   Flow matcher value.
9255  * @param[in] reg
9256  *   Flow pattern to translate.
9257  */
9258 static void
9259 flow_dv_translate_item_meta_vport(void *matcher, void *key,
9260                                   uint32_t value, uint32_t mask)
9261 {
9262         flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
9263 }
9264
9265 /**
9266  * Add tag item to matcher
9267  *
9268  * @param[in] dev
9269  *   The devich to configure through.
9270  * @param[in, out] matcher
9271  *   Flow matcher.
9272  * @param[in, out] key
9273  *   Flow matcher value.
9274  * @param[in] item
9275  *   Flow pattern to translate.
9276  */
9277 static void
9278 flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
9279                                 void *matcher, void *key,
9280                                 const struct rte_flow_item *item)
9281 {
9282         const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
9283         const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
9284         uint32_t mask, value;
9285
9286         MLX5_ASSERT(tag_v);
9287         value = tag_v->data;
9288         mask = tag_m ? tag_m->data : UINT32_MAX;
9289         if (tag_v->id == REG_C_0) {
9290                 struct mlx5_priv *priv = dev->data->dev_private;
9291                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9292                 uint32_t shl_c0 = rte_bsf32(msk_c0);
9293
9294                 mask &= msk_c0;
9295                 mask <<= shl_c0;
9296                 value <<= shl_c0;
9297         }
9298         flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
9299 }
9300
9301 /**
9302  * Add TAG item to matcher
9303  *
9304  * @param[in] dev
9305  *   The devich to configure through.
9306  * @param[in, out] matcher
9307  *   Flow matcher.
9308  * @param[in, out] key
9309  *   Flow matcher value.
9310  * @param[in] item
9311  *   Flow pattern to translate.
9312  */
9313 static void
9314 flow_dv_translate_item_tag(struct rte_eth_dev *dev,
9315                            void *matcher, void *key,
9316                            const struct rte_flow_item *item)
9317 {
9318         const struct rte_flow_item_tag *tag_v = item->spec;
9319         const struct rte_flow_item_tag *tag_m = item->mask;
9320         enum modify_reg reg;
9321
9322         MLX5_ASSERT(tag_v);
9323         tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
9324         /* Get the metadata register index for the tag. */
9325         reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
9326         MLX5_ASSERT(reg > 0);
9327         flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
9328 }
9329
9330 /**
9331  * Add source vport match to the specified matcher.
9332  *
9333  * @param[in, out] matcher
9334  *   Flow matcher.
9335  * @param[in, out] key
9336  *   Flow matcher value.
9337  * @param[in] port
9338  *   Source vport value to match
9339  * @param[in] mask
9340  *   Mask
9341  */
9342 static void
9343 flow_dv_translate_item_source_vport(void *matcher, void *key,
9344                                     int16_t port, uint16_t mask)
9345 {
9346         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9347         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9348
9349         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
9350         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
9351 }
9352
9353 /**
9354  * Translate port-id item to eswitch match on  port-id.
9355  *
9356  * @param[in] dev
9357  *   The devich to configure through.
9358  * @param[in, out] matcher
9359  *   Flow matcher.
9360  * @param[in, out] key
9361  *   Flow matcher value.
9362  * @param[in] item
9363  *   Flow pattern to translate.
9364  * @param[in]
9365  *   Flow attributes.
9366  *
9367  * @return
9368  *   0 on success, a negative errno value otherwise.
9369  */
9370 static int
9371 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
9372                                void *key, const struct rte_flow_item *item,
9373                                const struct rte_flow_attr *attr)
9374 {
9375         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
9376         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
9377         struct mlx5_priv *priv;
9378         uint16_t mask, id;
9379
9380         mask = pid_m ? pid_m->id : 0xffff;
9381         id = pid_v ? pid_v->id : dev->data->port_id;
9382         priv = mlx5_port_to_eswitch_info(id, item == NULL);
9383         if (!priv)
9384                 return -rte_errno;
9385         /*
9386          * Translate to vport field or to metadata, depending on mode.
9387          * Kernel can use either misc.source_port or half of C0 metadata
9388          * register.
9389          */
9390         if (priv->vport_meta_mask) {
9391                 /*
9392                  * Provide the hint for SW steering library
9393                  * to insert the flow into ingress domain and
9394                  * save the extra vport match.
9395                  */
9396                 if (mask == 0xffff && priv->vport_id == 0xffff &&
9397                     priv->pf_bond < 0 && attr->transfer)
9398                         flow_dv_translate_item_source_vport
9399                                 (matcher, key, priv->vport_id, mask);
9400                 /*
9401                  * We should always set the vport metadata register,
9402                  * otherwise the SW steering library can drop
9403                  * the rule if wire vport metadata value is not zero,
9404                  * it depends on kernel configuration.
9405                  */
9406                 flow_dv_translate_item_meta_vport(matcher, key,
9407                                                   priv->vport_meta_tag,
9408                                                   priv->vport_meta_mask);
9409         } else {
9410                 flow_dv_translate_item_source_vport(matcher, key,
9411                                                     priv->vport_id, mask);
9412         }
9413         return 0;
9414 }
9415
9416 /**
9417  * Add ICMP6 item to matcher and to the value.
9418  *
9419  * @param[in, out] matcher
9420  *   Flow matcher.
9421  * @param[in, out] key
9422  *   Flow matcher value.
9423  * @param[in] item
9424  *   Flow pattern to translate.
9425  * @param[in] inner
9426  *   Item is inner pattern.
9427  */
9428 static void
9429 flow_dv_translate_item_icmp6(void *matcher, void *key,
9430                               const struct rte_flow_item *item,
9431                               int inner)
9432 {
9433         const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
9434         const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
9435         void *headers_m;
9436         void *headers_v;
9437         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9438                                      misc_parameters_3);
9439         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9440         if (inner) {
9441                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9442                                          inner_headers);
9443                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9444         } else {
9445                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9446                                          outer_headers);
9447                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9448         }
9449         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
9450         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
9451         if (!icmp6_v)
9452                 return;
9453         if (!icmp6_m)
9454                 icmp6_m = &rte_flow_item_icmp6_mask;
9455         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
9456         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
9457                  icmp6_v->type & icmp6_m->type);
9458         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
9459         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
9460                  icmp6_v->code & icmp6_m->code);
9461 }
9462
9463 /**
9464  * Add ICMP item to matcher and to the value.
9465  *
9466  * @param[in, out] matcher
9467  *   Flow matcher.
9468  * @param[in, out] key
9469  *   Flow matcher value.
9470  * @param[in] item
9471  *   Flow pattern to translate.
9472  * @param[in] inner
9473  *   Item is inner pattern.
9474  */
9475 static void
9476 flow_dv_translate_item_icmp(void *matcher, void *key,
9477                             const struct rte_flow_item *item,
9478                             int inner)
9479 {
9480         const struct rte_flow_item_icmp *icmp_m = item->mask;
9481         const struct rte_flow_item_icmp *icmp_v = item->spec;
9482         uint32_t icmp_header_data_m = 0;
9483         uint32_t icmp_header_data_v = 0;
9484         void *headers_m;
9485         void *headers_v;
9486         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9487                                      misc_parameters_3);
9488         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9489         if (inner) {
9490                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9491                                          inner_headers);
9492                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9493         } else {
9494                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9495                                          outer_headers);
9496                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9497         }
9498         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
9499         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
9500         if (!icmp_v)
9501                 return;
9502         if (!icmp_m)
9503                 icmp_m = &rte_flow_item_icmp_mask;
9504         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
9505                  icmp_m->hdr.icmp_type);
9506         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
9507                  icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
9508         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
9509                  icmp_m->hdr.icmp_code);
9510         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
9511                  icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
9512         icmp_header_data_m = rte_be_to_cpu_16(icmp_m->hdr.icmp_seq_nb);
9513         icmp_header_data_m |= rte_be_to_cpu_16(icmp_m->hdr.icmp_ident) << 16;
9514         if (icmp_header_data_m) {
9515                 icmp_header_data_v = rte_be_to_cpu_16(icmp_v->hdr.icmp_seq_nb);
9516                 icmp_header_data_v |=
9517                          rte_be_to_cpu_16(icmp_v->hdr.icmp_ident) << 16;
9518                 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_header_data,
9519                          icmp_header_data_m);
9520                 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_header_data,
9521                          icmp_header_data_v & icmp_header_data_m);
9522         }
9523 }
9524
9525 /**
9526  * Add GTP item to matcher and to the value.
9527  *
9528  * @param[in, out] matcher
9529  *   Flow matcher.
9530  * @param[in, out] key
9531  *   Flow matcher value.
9532  * @param[in] item
9533  *   Flow pattern to translate.
9534  * @param[in] inner
9535  *   Item is inner pattern.
9536  */
9537 static void
9538 flow_dv_translate_item_gtp(void *matcher, void *key,
9539                            const struct rte_flow_item *item, int inner)
9540 {
9541         const struct rte_flow_item_gtp *gtp_m = item->mask;
9542         const struct rte_flow_item_gtp *gtp_v = item->spec;
9543         void *headers_m;
9544         void *headers_v;
9545         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9546                                      misc_parameters_3);
9547         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9548         uint16_t dport = RTE_GTPU_UDP_PORT;
9549
9550         if (inner) {
9551                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9552                                          inner_headers);
9553                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9554         } else {
9555                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9556                                          outer_headers);
9557                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9558         }
9559         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9560                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
9561                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
9562         }
9563         if (!gtp_v)
9564                 return;
9565         if (!gtp_m)
9566                 gtp_m = &rte_flow_item_gtp_mask;
9567         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags,
9568                  gtp_m->v_pt_rsv_flags);
9569         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags,
9570                  gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags);
9571         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
9572         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
9573                  gtp_v->msg_type & gtp_m->msg_type);
9574         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,
9575                  rte_be_to_cpu_32(gtp_m->teid));
9576         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,
9577                  rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
9578 }
9579
9580 /**
9581  * Add GTP PSC item to matcher.
9582  *
9583  * @param[in, out] matcher
9584  *   Flow matcher.
9585  * @param[in, out] key
9586  *   Flow matcher value.
9587  * @param[in] item
9588  *   Flow pattern to translate.
9589  */
9590 static int
9591 flow_dv_translate_item_gtp_psc(void *matcher, void *key,
9592                                const struct rte_flow_item *item)
9593 {
9594         const struct rte_flow_item_gtp_psc *gtp_psc_m = item->mask;
9595         const struct rte_flow_item_gtp_psc *gtp_psc_v = item->spec;
9596         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9597                         misc_parameters_3);
9598         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9599         union {
9600                 uint32_t w32;
9601                 struct {
9602                         uint16_t seq_num;
9603                         uint8_t npdu_num;
9604                         uint8_t next_ext_header_type;
9605                 };
9606         } dw_2;
9607         uint8_t gtp_flags;
9608
9609         /* Always set E-flag match on one, regardless of GTP item settings. */
9610         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_m, gtpu_msg_flags);
9611         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
9612         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags, gtp_flags);
9613         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_v, gtpu_msg_flags);
9614         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
9615         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags, gtp_flags);
9616         /*Set next extension header type. */
9617         dw_2.seq_num = 0;
9618         dw_2.npdu_num = 0;
9619         dw_2.next_ext_header_type = 0xff;
9620         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_dw_2,
9621                  rte_cpu_to_be_32(dw_2.w32));
9622         dw_2.seq_num = 0;
9623         dw_2.npdu_num = 0;
9624         dw_2.next_ext_header_type = 0x85;
9625         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_dw_2,
9626                  rte_cpu_to_be_32(dw_2.w32));
9627         if (gtp_psc_v) {
9628                 union {
9629                         uint32_t w32;
9630                         struct {
9631                                 uint8_t len;
9632                                 uint8_t type_flags;
9633                                 uint8_t qfi;
9634                                 uint8_t reserved;
9635                         };
9636                 } dw_0;
9637
9638                 /*Set extension header PDU type and Qos. */
9639                 if (!gtp_psc_m)
9640                         gtp_psc_m = &rte_flow_item_gtp_psc_mask;
9641                 dw_0.w32 = 0;
9642                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_m->pdu_type);
9643                 dw_0.qfi = gtp_psc_m->qfi;
9644                 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_first_ext_dw_0,
9645                          rte_cpu_to_be_32(dw_0.w32));
9646                 dw_0.w32 = 0;
9647                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_v->pdu_type &
9648                                                         gtp_psc_m->pdu_type);
9649                 dw_0.qfi = gtp_psc_v->qfi & gtp_psc_m->qfi;
9650                 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_first_ext_dw_0,
9651                          rte_cpu_to_be_32(dw_0.w32));
9652         }
9653         return 0;
9654 }
9655
9656 /**
9657  * Add eCPRI item to matcher and to the value.
9658  *
9659  * @param[in] dev
9660  *   The devich to configure through.
9661  * @param[in, out] matcher
9662  *   Flow matcher.
9663  * @param[in, out] key
9664  *   Flow matcher value.
9665  * @param[in] item
9666  *   Flow pattern to translate.
9667  * @param[in] samples
9668  *   Sample IDs to be used in the matching.
9669  */
9670 static void
9671 flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
9672                              void *key, const struct rte_flow_item *item)
9673 {
9674         struct mlx5_priv *priv = dev->data->dev_private;
9675         const struct rte_flow_item_ecpri *ecpri_m = item->mask;
9676         const struct rte_flow_item_ecpri *ecpri_v = item->spec;
9677         struct rte_ecpri_common_hdr common;
9678         void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
9679                                      misc_parameters_4);
9680         void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
9681         uint32_t *samples;
9682         void *dw_m;
9683         void *dw_v;
9684
9685         if (!ecpri_v)
9686                 return;
9687         if (!ecpri_m)
9688                 ecpri_m = &rte_flow_item_ecpri_mask;
9689         /*
9690          * Maximal four DW samples are supported in a single matching now.
9691          * Two are used now for a eCPRI matching:
9692          * 1. Type: one byte, mask should be 0x00ff0000 in network order
9693          * 2. ID of a message: one or two bytes, mask 0xffff0000 or 0xff000000
9694          *    if any.
9695          */
9696         if (!ecpri_m->hdr.common.u32)
9697                 return;
9698         samples = priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0].ids;
9699         /* Need to take the whole DW as the mask to fill the entry. */
9700         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
9701                             prog_sample_field_value_0);
9702         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
9703                             prog_sample_field_value_0);
9704         /* Already big endian (network order) in the header. */
9705         *(uint32_t *)dw_m = ecpri_m->hdr.common.u32;
9706         *(uint32_t *)dw_v = ecpri_v->hdr.common.u32 & ecpri_m->hdr.common.u32;
9707         /* Sample#0, used for matching type, offset 0. */
9708         MLX5_SET(fte_match_set_misc4, misc4_m,
9709                  prog_sample_field_id_0, samples[0]);
9710         /* It makes no sense to set the sample ID in the mask field. */
9711         MLX5_SET(fte_match_set_misc4, misc4_v,
9712                  prog_sample_field_id_0, samples[0]);
9713         /*
9714          * Checking if message body part needs to be matched.
9715          * Some wildcard rules only matching type field should be supported.
9716          */
9717         if (ecpri_m->hdr.dummy[0]) {
9718                 common.u32 = rte_be_to_cpu_32(ecpri_v->hdr.common.u32);
9719                 switch (common.type) {
9720                 case RTE_ECPRI_MSG_TYPE_IQ_DATA:
9721                 case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
9722                 case RTE_ECPRI_MSG_TYPE_DLY_MSR:
9723                         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
9724                                             prog_sample_field_value_1);
9725                         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
9726                                             prog_sample_field_value_1);
9727                         *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0];
9728                         *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0] &
9729                                             ecpri_m->hdr.dummy[0];
9730                         /* Sample#1, to match message body, offset 4. */
9731                         MLX5_SET(fte_match_set_misc4, misc4_m,
9732                                  prog_sample_field_id_1, samples[1]);
9733                         MLX5_SET(fte_match_set_misc4, misc4_v,
9734                                  prog_sample_field_id_1, samples[1]);
9735                         break;
9736                 default:
9737                         /* Others, do not match any sample ID. */
9738                         break;
9739                 }
9740         }
9741 }
9742
9743 /*
9744  * Add connection tracking status item to matcher
9745  *
9746  * @param[in] dev
9747  *   The devich to configure through.
9748  * @param[in, out] matcher
9749  *   Flow matcher.
9750  * @param[in, out] key
9751  *   Flow matcher value.
9752  * @param[in] item
9753  *   Flow pattern to translate.
9754  */
9755 static void
9756 flow_dv_translate_item_aso_ct(struct rte_eth_dev *dev,
9757                               void *matcher, void *key,
9758                               const struct rte_flow_item *item)
9759 {
9760         uint32_t reg_value = 0;
9761         int reg_id;
9762         /* 8LSB 0b 11/0000/11, middle 4 bits are reserved. */
9763         uint32_t reg_mask = 0;
9764         const struct rte_flow_item_conntrack *spec = item->spec;
9765         const struct rte_flow_item_conntrack *mask = item->mask;
9766         uint32_t flags;
9767         struct rte_flow_error error;
9768
9769         if (!mask)
9770                 mask = &rte_flow_item_conntrack_mask;
9771         if (!spec || !mask->flags)
9772                 return;
9773         flags = spec->flags & mask->flags;
9774         /* The conflict should be checked in the validation. */
9775         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID)
9776                 reg_value |= MLX5_CT_SYNDROME_VALID;
9777         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
9778                 reg_value |= MLX5_CT_SYNDROME_STATE_CHANGE;
9779         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID)
9780                 reg_value |= MLX5_CT_SYNDROME_INVALID;
9781         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)
9782                 reg_value |= MLX5_CT_SYNDROME_TRAP;
9783         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
9784                 reg_value |= MLX5_CT_SYNDROME_BAD_PACKET;
9785         if (mask->flags & (RTE_FLOW_CONNTRACK_PKT_STATE_VALID |
9786                            RTE_FLOW_CONNTRACK_PKT_STATE_INVALID |
9787                            RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED))
9788                 reg_mask |= 0xc0;
9789         if (mask->flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
9790                 reg_mask |= MLX5_CT_SYNDROME_STATE_CHANGE;
9791         if (mask->flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
9792                 reg_mask |= MLX5_CT_SYNDROME_BAD_PACKET;
9793         /* The REG_C_x value could be saved during startup. */
9794         reg_id = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, &error);
9795         if (reg_id == REG_NON)
9796                 return;
9797         flow_dv_match_meta_reg(matcher, key, (enum modify_reg)reg_id,
9798                                reg_value, reg_mask);
9799 }
9800
9801 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
9802
9803 #define HEADER_IS_ZERO(match_criteria, headers)                              \
9804         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
9805                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
9806
9807 /**
9808  * Calculate flow matcher enable bitmap.
9809  *
9810  * @param match_criteria
9811  *   Pointer to flow matcher criteria.
9812  *
9813  * @return
9814  *   Bitmap of enabled fields.
9815  */
9816 static uint8_t
9817 flow_dv_matcher_enable(uint32_t *match_criteria)
9818 {
9819         uint8_t match_criteria_enable;
9820
9821         match_criteria_enable =
9822                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
9823                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
9824         match_criteria_enable |=
9825                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
9826                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
9827         match_criteria_enable |=
9828                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
9829                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
9830         match_criteria_enable |=
9831                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
9832                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
9833         match_criteria_enable |=
9834                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
9835                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
9836         match_criteria_enable |=
9837                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) <<
9838                 MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT;
9839         return match_criteria_enable;
9840 }
9841
9842 struct mlx5_hlist_entry *
9843 flow_dv_tbl_create_cb(struct mlx5_hlist *list, uint64_t key64, void *cb_ctx)
9844 {
9845         struct mlx5_dev_ctx_shared *sh = list->ctx;
9846         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9847         struct rte_eth_dev *dev = ctx->dev;
9848         struct mlx5_flow_tbl_data_entry *tbl_data;
9849         struct mlx5_flow_tbl_tunnel_prm *tt_prm = ctx->data;
9850         struct rte_flow_error *error = ctx->error;
9851         union mlx5_flow_tbl_key key = { .v64 = key64 };
9852         struct mlx5_flow_tbl_resource *tbl;
9853         void *domain;
9854         uint32_t idx = 0;
9855         int ret;
9856
9857         tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
9858         if (!tbl_data) {
9859                 rte_flow_error_set(error, ENOMEM,
9860                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9861                                    NULL,
9862                                    "cannot allocate flow table data entry");
9863                 return NULL;
9864         }
9865         tbl_data->idx = idx;
9866         tbl_data->tunnel = tt_prm->tunnel;
9867         tbl_data->group_id = tt_prm->group_id;
9868         tbl_data->external = !!tt_prm->external;
9869         tbl_data->tunnel_offload = is_tunnel_offload_active(dev);
9870         tbl_data->is_egress = !!key.is_egress;
9871         tbl_data->is_transfer = !!key.is_fdb;
9872         tbl_data->dummy = !!key.dummy;
9873         tbl_data->level = key.level;
9874         tbl_data->id = key.id;
9875         tbl = &tbl_data->tbl;
9876         if (key.dummy)
9877                 return &tbl_data->entry;
9878         if (key.is_fdb)
9879                 domain = sh->fdb_domain;
9880         else if (key.is_egress)
9881                 domain = sh->tx_domain;
9882         else
9883                 domain = sh->rx_domain;
9884         ret = mlx5_flow_os_create_flow_tbl(domain, key.level, &tbl->obj);
9885         if (ret) {
9886                 rte_flow_error_set(error, ENOMEM,
9887                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9888                                    NULL, "cannot create flow table object");
9889                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
9890                 return NULL;
9891         }
9892         if (key.level != 0) {
9893                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
9894                                         (tbl->obj, &tbl_data->jump.action);
9895                 if (ret) {
9896                         rte_flow_error_set(error, ENOMEM,
9897                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9898                                            NULL,
9899                                            "cannot create flow jump action");
9900                         mlx5_flow_os_destroy_flow_tbl(tbl->obj);
9901                         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
9902                         return NULL;
9903                 }
9904         }
9905         MKSTR(matcher_name, "%s_%s_%u_%u_matcher_cache",
9906               key.is_fdb ? "FDB" : "NIC", key.is_egress ? "egress" : "ingress",
9907               key.level, key.id);
9908         mlx5_cache_list_init(&tbl_data->matchers, matcher_name, 0, sh,
9909                              flow_dv_matcher_create_cb,
9910                              flow_dv_matcher_match_cb,
9911                              flow_dv_matcher_remove_cb);
9912         return &tbl_data->entry;
9913 }
9914
9915 int
9916 flow_dv_tbl_match_cb(struct mlx5_hlist *list __rte_unused,
9917                      struct mlx5_hlist_entry *entry, uint64_t key64,
9918                      void *cb_ctx __rte_unused)
9919 {
9920         struct mlx5_flow_tbl_data_entry *tbl_data =
9921                 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
9922         union mlx5_flow_tbl_key key = { .v64 = key64 };
9923
9924         return tbl_data->level != key.level ||
9925                tbl_data->id != key.id ||
9926                tbl_data->dummy != key.dummy ||
9927                tbl_data->is_transfer != !!key.is_fdb ||
9928                tbl_data->is_egress != !!key.is_egress;
9929 }
9930
9931 /**
9932  * Get a flow table.
9933  *
9934  * @param[in, out] dev
9935  *   Pointer to rte_eth_dev structure.
9936  * @param[in] table_level
9937  *   Table level to use.
9938  * @param[in] egress
9939  *   Direction of the table.
9940  * @param[in] transfer
9941  *   E-Switch or NIC flow.
9942  * @param[in] dummy
9943  *   Dummy entry for dv API.
9944  * @param[in] table_id
9945  *   Table id to use.
9946  * @param[out] error
9947  *   pointer to error structure.
9948  *
9949  * @return
9950  *   Returns tables resource based on the index, NULL in case of failed.
9951  */
9952 struct mlx5_flow_tbl_resource *
9953 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
9954                          uint32_t table_level, uint8_t egress,
9955                          uint8_t transfer,
9956                          bool external,
9957                          const struct mlx5_flow_tunnel *tunnel,
9958                          uint32_t group_id, uint8_t dummy,
9959                          uint32_t table_id,
9960                          struct rte_flow_error *error)
9961 {
9962         struct mlx5_priv *priv = dev->data->dev_private;
9963         union mlx5_flow_tbl_key table_key = {
9964                 {
9965                         .level = table_level,
9966                         .id = table_id,
9967                         .reserved = 0,
9968                         .dummy = !!dummy,
9969                         .is_fdb = !!transfer,
9970                         .is_egress = !!egress,
9971                 }
9972         };
9973         struct mlx5_flow_tbl_tunnel_prm tt_prm = {
9974                 .tunnel = tunnel,
9975                 .group_id = group_id,
9976                 .external = external,
9977         };
9978         struct mlx5_flow_cb_ctx ctx = {
9979                 .dev = dev,
9980                 .error = error,
9981                 .data = &tt_prm,
9982         };
9983         struct mlx5_hlist_entry *entry;
9984         struct mlx5_flow_tbl_data_entry *tbl_data;
9985
9986         entry = mlx5_hlist_register(priv->sh->flow_tbls, table_key.v64, &ctx);
9987         if (!entry) {
9988                 rte_flow_error_set(error, ENOMEM,
9989                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9990                                    "cannot get table");
9991                 return NULL;
9992         }
9993         DRV_LOG(DEBUG, "table_level %u table_id %u "
9994                 "tunnel %u group %u registered.",
9995                 table_level, table_id,
9996                 tunnel ? tunnel->tunnel_id : 0, group_id);
9997         tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
9998         return &tbl_data->tbl;
9999 }
10000
10001 void
10002 flow_dv_tbl_remove_cb(struct mlx5_hlist *list,
10003                       struct mlx5_hlist_entry *entry)
10004 {
10005         struct mlx5_dev_ctx_shared *sh = list->ctx;
10006         struct mlx5_flow_tbl_data_entry *tbl_data =
10007                 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10008
10009         MLX5_ASSERT(entry && sh);
10010         if (tbl_data->jump.action)
10011                 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
10012         if (tbl_data->tbl.obj)
10013                 mlx5_flow_os_destroy_flow_tbl(tbl_data->tbl.obj);
10014         if (tbl_data->tunnel_offload && tbl_data->external) {
10015                 struct mlx5_hlist_entry *he;
10016                 struct mlx5_hlist *tunnel_grp_hash;
10017                 struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
10018                 union tunnel_tbl_key tunnel_key = {
10019                         .tunnel_id = tbl_data->tunnel ?
10020                                         tbl_data->tunnel->tunnel_id : 0,
10021                         .group = tbl_data->group_id
10022                 };
10023                 uint32_t table_level = tbl_data->level;
10024
10025                 tunnel_grp_hash = tbl_data->tunnel ?
10026                                         tbl_data->tunnel->groups :
10027                                         thub->groups;
10028                 he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val, NULL);
10029                 if (he)
10030                         mlx5_hlist_unregister(tunnel_grp_hash, he);
10031                 DRV_LOG(DEBUG,
10032                         "table_level %u id %u tunnel %u group %u released.",
10033                         table_level,
10034                         tbl_data->id,
10035                         tbl_data->tunnel ?
10036                         tbl_data->tunnel->tunnel_id : 0,
10037                         tbl_data->group_id);
10038         }
10039         mlx5_cache_list_destroy(&tbl_data->matchers);
10040         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
10041 }
10042
10043 /**
10044  * Release a flow table.
10045  *
10046  * @param[in] sh
10047  *   Pointer to device shared structure.
10048  * @param[in] tbl
10049  *   Table resource to be released.
10050  *
10051  * @return
10052  *   Returns 0 if table was released, else return 1;
10053  */
10054 static int
10055 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
10056                              struct mlx5_flow_tbl_resource *tbl)
10057 {
10058         struct mlx5_flow_tbl_data_entry *tbl_data =
10059                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
10060
10061         if (!tbl)
10062                 return 0;
10063         return mlx5_hlist_unregister(sh->flow_tbls, &tbl_data->entry);
10064 }
10065
10066 int
10067 flow_dv_matcher_match_cb(struct mlx5_cache_list *list __rte_unused,
10068                          struct mlx5_cache_entry *entry, void *cb_ctx)
10069 {
10070         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10071         struct mlx5_flow_dv_matcher *ref = ctx->data;
10072         struct mlx5_flow_dv_matcher *cur = container_of(entry, typeof(*cur),
10073                                                         entry);
10074
10075         return cur->crc != ref->crc ||
10076                cur->priority != ref->priority ||
10077                memcmp((const void *)cur->mask.buf,
10078                       (const void *)ref->mask.buf, ref->mask.size);
10079 }
10080
10081 struct mlx5_cache_entry *
10082 flow_dv_matcher_create_cb(struct mlx5_cache_list *list,
10083                           struct mlx5_cache_entry *entry __rte_unused,
10084                           void *cb_ctx)
10085 {
10086         struct mlx5_dev_ctx_shared *sh = list->ctx;
10087         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10088         struct mlx5_flow_dv_matcher *ref = ctx->data;
10089         struct mlx5_flow_dv_matcher *cache;
10090         struct mlx5dv_flow_matcher_attr dv_attr = {
10091                 .type = IBV_FLOW_ATTR_NORMAL,
10092                 .match_mask = (void *)&ref->mask,
10093         };
10094         struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
10095                                                             typeof(*tbl), tbl);
10096         int ret;
10097
10098         cache = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*cache), 0, SOCKET_ID_ANY);
10099         if (!cache) {
10100                 rte_flow_error_set(ctx->error, ENOMEM,
10101                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10102                                    "cannot create matcher");
10103                 return NULL;
10104         }
10105         *cache = *ref;
10106         dv_attr.match_criteria_enable =
10107                 flow_dv_matcher_enable(cache->mask.buf);
10108         dv_attr.priority = ref->priority;
10109         if (tbl->is_egress)
10110                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
10111         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->tbl.obj,
10112                                                &cache->matcher_object);
10113         if (ret) {
10114                 mlx5_free(cache);
10115                 rte_flow_error_set(ctx->error, ENOMEM,
10116                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10117                                    "cannot create matcher");
10118                 return NULL;
10119         }
10120         return &cache->entry;
10121 }
10122
10123 /**
10124  * Register the flow matcher.
10125  *
10126  * @param[in, out] dev
10127  *   Pointer to rte_eth_dev structure.
10128  * @param[in, out] matcher
10129  *   Pointer to flow matcher.
10130  * @param[in, out] key
10131  *   Pointer to flow table key.
10132  * @parm[in, out] dev_flow
10133  *   Pointer to the dev_flow.
10134  * @param[out] error
10135  *   pointer to error structure.
10136  *
10137  * @return
10138  *   0 on success otherwise -errno and errno is set.
10139  */
10140 static int
10141 flow_dv_matcher_register(struct rte_eth_dev *dev,
10142                          struct mlx5_flow_dv_matcher *ref,
10143                          union mlx5_flow_tbl_key *key,
10144                          struct mlx5_flow *dev_flow,
10145                          const struct mlx5_flow_tunnel *tunnel,
10146                          uint32_t group_id,
10147                          struct rte_flow_error *error)
10148 {
10149         struct mlx5_cache_entry *entry;
10150         struct mlx5_flow_dv_matcher *cache;
10151         struct mlx5_flow_tbl_resource *tbl;
10152         struct mlx5_flow_tbl_data_entry *tbl_data;
10153         struct mlx5_flow_cb_ctx ctx = {
10154                 .error = error,
10155                 .data = ref,
10156         };
10157
10158         /**
10159          * tunnel offload API requires this registration for cases when
10160          * tunnel match rule was inserted before tunnel set rule.
10161          */
10162         tbl = flow_dv_tbl_resource_get(dev, key->level,
10163                                        key->is_egress, key->is_fdb,
10164                                        dev_flow->external, tunnel,
10165                                        group_id, 0, key->id, error);
10166         if (!tbl)
10167                 return -rte_errno;      /* No need to refill the error info */
10168         tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
10169         ref->tbl = tbl;
10170         entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
10171         if (!entry) {
10172                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
10173                 return rte_flow_error_set(error, ENOMEM,
10174                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10175                                           "cannot allocate ref memory");
10176         }
10177         cache = container_of(entry, typeof(*cache), entry);
10178         dev_flow->handle->dvh.matcher = cache;
10179         return 0;
10180 }
10181
10182 struct mlx5_hlist_entry *
10183 flow_dv_tag_create_cb(struct mlx5_hlist *list, uint64_t key, void *ctx)
10184 {
10185         struct mlx5_dev_ctx_shared *sh = list->ctx;
10186         struct rte_flow_error *error = ctx;
10187         struct mlx5_flow_dv_tag_resource *entry;
10188         uint32_t idx = 0;
10189         int ret;
10190
10191         entry = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
10192         if (!entry) {
10193                 rte_flow_error_set(error, ENOMEM,
10194                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10195                                    "cannot allocate resource memory");
10196                 return NULL;
10197         }
10198         entry->idx = idx;
10199         entry->tag_id = key;
10200         ret = mlx5_flow_os_create_flow_action_tag(key,
10201                                                   &entry->action);
10202         if (ret) {
10203                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], idx);
10204                 rte_flow_error_set(error, ENOMEM,
10205                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10206                                    NULL, "cannot create action");
10207                 return NULL;
10208         }
10209         return &entry->entry;
10210 }
10211
10212 int
10213 flow_dv_tag_match_cb(struct mlx5_hlist *list __rte_unused,
10214                      struct mlx5_hlist_entry *entry, uint64_t key,
10215                      void *cb_ctx __rte_unused)
10216 {
10217         struct mlx5_flow_dv_tag_resource *tag =
10218                 container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10219
10220         return key != tag->tag_id;
10221 }
10222
10223 /**
10224  * Find existing tag resource or create and register a new one.
10225  *
10226  * @param dev[in, out]
10227  *   Pointer to rte_eth_dev structure.
10228  * @param[in, out] tag_be24
10229  *   Tag value in big endian then R-shift 8.
10230  * @parm[in, out] dev_flow
10231  *   Pointer to the dev_flow.
10232  * @param[out] error
10233  *   pointer to error structure.
10234  *
10235  * @return
10236  *   0 on success otherwise -errno and errno is set.
10237  */
10238 static int
10239 flow_dv_tag_resource_register
10240                         (struct rte_eth_dev *dev,
10241                          uint32_t tag_be24,
10242                          struct mlx5_flow *dev_flow,
10243                          struct rte_flow_error *error)
10244 {
10245         struct mlx5_priv *priv = dev->data->dev_private;
10246         struct mlx5_flow_dv_tag_resource *cache_resource;
10247         struct mlx5_hlist_entry *entry;
10248
10249         entry = mlx5_hlist_register(priv->sh->tag_table, tag_be24, error);
10250         if (entry) {
10251                 cache_resource = container_of
10252                         (entry, struct mlx5_flow_dv_tag_resource, entry);
10253                 dev_flow->handle->dvh.rix_tag = cache_resource->idx;
10254                 dev_flow->dv.tag_resource = cache_resource;
10255                 return 0;
10256         }
10257         return -rte_errno;
10258 }
10259
10260 void
10261 flow_dv_tag_remove_cb(struct mlx5_hlist *list,
10262                       struct mlx5_hlist_entry *entry)
10263 {
10264         struct mlx5_dev_ctx_shared *sh = list->ctx;
10265         struct mlx5_flow_dv_tag_resource *tag =
10266                 container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10267
10268         MLX5_ASSERT(tag && sh && tag->action);
10269         claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
10270         DRV_LOG(DEBUG, "Tag %p: removed.", (void *)tag);
10271         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
10272 }
10273
10274 /**
10275  * Release the tag.
10276  *
10277  * @param dev
10278  *   Pointer to Ethernet device.
10279  * @param tag_idx
10280  *   Tag index.
10281  *
10282  * @return
10283  *   1 while a reference on it exists, 0 when freed.
10284  */
10285 static int
10286 flow_dv_tag_release(struct rte_eth_dev *dev,
10287                     uint32_t tag_idx)
10288 {
10289         struct mlx5_priv *priv = dev->data->dev_private;
10290         struct mlx5_flow_dv_tag_resource *tag;
10291
10292         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
10293         if (!tag)
10294                 return 0;
10295         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
10296                 dev->data->port_id, (void *)tag, tag->entry.ref_cnt);
10297         return mlx5_hlist_unregister(priv->sh->tag_table, &tag->entry);
10298 }
10299
10300 /**
10301  * Translate port ID action to vport.
10302  *
10303  * @param[in] dev
10304  *   Pointer to rte_eth_dev structure.
10305  * @param[in] action
10306  *   Pointer to the port ID action.
10307  * @param[out] dst_port_id
10308  *   The target port ID.
10309  * @param[out] error
10310  *   Pointer to the error structure.
10311  *
10312  * @return
10313  *   0 on success, a negative errno value otherwise and rte_errno is set.
10314  */
10315 static int
10316 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
10317                                  const struct rte_flow_action *action,
10318                                  uint32_t *dst_port_id,
10319                                  struct rte_flow_error *error)
10320 {
10321         uint32_t port;
10322         struct mlx5_priv *priv;
10323         const struct rte_flow_action_port_id *conf =
10324                         (const struct rte_flow_action_port_id *)action->conf;
10325
10326         port = conf->original ? dev->data->port_id : conf->id;
10327         priv = mlx5_port_to_eswitch_info(port, false);
10328         if (!priv)
10329                 return rte_flow_error_set(error, -rte_errno,
10330                                           RTE_FLOW_ERROR_TYPE_ACTION,
10331                                           NULL,
10332                                           "No eswitch info was found for port");
10333 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
10334         /*
10335          * This parameter is transferred to
10336          * mlx5dv_dr_action_create_dest_ib_port().
10337          */
10338         *dst_port_id = priv->dev_port;
10339 #else
10340         /*
10341          * Legacy mode, no LAG configurations is supported.
10342          * This parameter is transferred to
10343          * mlx5dv_dr_action_create_dest_vport().
10344          */
10345         *dst_port_id = priv->vport_id;
10346 #endif
10347         return 0;
10348 }
10349
10350 /**
10351  * Create a counter with aging configuration.
10352  *
10353  * @param[in] dev
10354  *   Pointer to rte_eth_dev structure.
10355  * @param[in] dev_flow
10356  *   Pointer to the mlx5_flow.
10357  * @param[out] count
10358  *   Pointer to the counter action configuration.
10359  * @param[in] age
10360  *   Pointer to the aging action configuration.
10361  *
10362  * @return
10363  *   Index to flow counter on success, 0 otherwise.
10364  */
10365 static uint32_t
10366 flow_dv_translate_create_counter(struct rte_eth_dev *dev,
10367                                 struct mlx5_flow *dev_flow,
10368                                 const struct rte_flow_action_count *count,
10369                                 const struct rte_flow_action_age *age)
10370 {
10371         uint32_t counter;
10372         struct mlx5_age_param *age_param;
10373
10374         if (count && count->shared)
10375                 counter = flow_dv_counter_get_shared(dev, count->id);
10376         else
10377                 counter = flow_dv_counter_alloc(dev, !!age);
10378         if (!counter || age == NULL)
10379                 return counter;
10380         age_param = flow_dv_counter_idx_get_age(dev, counter);
10381         age_param->context = age->context ? age->context :
10382                 (void *)(uintptr_t)(dev_flow->flow_idx);
10383         age_param->timeout = age->timeout;
10384         age_param->port_id = dev->data->port_id;
10385         __atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);
10386         __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
10387         return counter;
10388 }
10389
10390 /**
10391  * Add Tx queue matcher
10392  *
10393  * @param[in] dev
10394  *   Pointer to the dev struct.
10395  * @param[in, out] matcher
10396  *   Flow matcher.
10397  * @param[in, out] key
10398  *   Flow matcher value.
10399  * @param[in] item
10400  *   Flow pattern to translate.
10401  * @param[in] inner
10402  *   Item is inner pattern.
10403  */
10404 static void
10405 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
10406                                 void *matcher, void *key,
10407                                 const struct rte_flow_item *item)
10408 {
10409         const struct mlx5_rte_flow_item_tx_queue *queue_m;
10410         const struct mlx5_rte_flow_item_tx_queue *queue_v;
10411         void *misc_m =
10412                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
10413         void *misc_v =
10414                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
10415         struct mlx5_txq_ctrl *txq;
10416         uint32_t queue;
10417
10418
10419         queue_m = (const void *)item->mask;
10420         if (!queue_m)
10421                 return;
10422         queue_v = (const void *)item->spec;
10423         if (!queue_v)
10424                 return;
10425         txq = mlx5_txq_get(dev, queue_v->queue);
10426         if (!txq)
10427                 return;
10428         queue = txq->obj->sq->id;
10429         MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue);
10430         MLX5_SET(fte_match_set_misc, misc_v, source_sqn,
10431                  queue & queue_m->queue);
10432         mlx5_txq_release(dev, queue_v->queue);
10433 }
10434
10435 /**
10436  * Set the hash fields according to the @p flow information.
10437  *
10438  * @param[in] dev_flow
10439  *   Pointer to the mlx5_flow.
10440  * @param[in] rss_desc
10441  *   Pointer to the mlx5_flow_rss_desc.
10442  */
10443 static void
10444 flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
10445                        struct mlx5_flow_rss_desc *rss_desc)
10446 {
10447         uint64_t items = dev_flow->handle->layers;
10448         int rss_inner = 0;
10449         uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
10450
10451         dev_flow->hash_fields = 0;
10452 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
10453         if (rss_desc->level >= 2) {
10454                 dev_flow->hash_fields |= IBV_RX_HASH_INNER;
10455                 rss_inner = 1;
10456         }
10457 #endif
10458         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
10459             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
10460                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
10461                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
10462                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
10463                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
10464                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
10465                         else
10466                                 dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
10467                 }
10468         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
10469                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
10470                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
10471                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
10472                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
10473                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
10474                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
10475                         else
10476                                 dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
10477                 }
10478         }
10479         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
10480             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
10481                 if (rss_types & ETH_RSS_UDP) {
10482                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
10483                                 dev_flow->hash_fields |=
10484                                                 IBV_RX_HASH_SRC_PORT_UDP;
10485                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
10486                                 dev_flow->hash_fields |=
10487                                                 IBV_RX_HASH_DST_PORT_UDP;
10488                         else
10489                                 dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
10490                 }
10491         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
10492                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
10493                 if (rss_types & ETH_RSS_TCP) {
10494                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
10495                                 dev_flow->hash_fields |=
10496                                                 IBV_RX_HASH_SRC_PORT_TCP;
10497                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
10498                                 dev_flow->hash_fields |=
10499                                                 IBV_RX_HASH_DST_PORT_TCP;
10500                         else
10501                                 dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
10502                 }
10503         }
10504 }
10505
10506 /**
10507  * Prepare an Rx Hash queue.
10508  *
10509  * @param dev
10510  *   Pointer to Ethernet device.
10511  * @param[in] dev_flow
10512  *   Pointer to the mlx5_flow.
10513  * @param[in] rss_desc
10514  *   Pointer to the mlx5_flow_rss_desc.
10515  * @param[out] hrxq_idx
10516  *   Hash Rx queue index.
10517  *
10518  * @return
10519  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
10520  */
10521 static struct mlx5_hrxq *
10522 flow_dv_hrxq_prepare(struct rte_eth_dev *dev,
10523                      struct mlx5_flow *dev_flow,
10524                      struct mlx5_flow_rss_desc *rss_desc,
10525                      uint32_t *hrxq_idx)
10526 {
10527         struct mlx5_priv *priv = dev->data->dev_private;
10528         struct mlx5_flow_handle *dh = dev_flow->handle;
10529         struct mlx5_hrxq *hrxq;
10530
10531         MLX5_ASSERT(rss_desc->queue_num);
10532         rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
10533         rss_desc->hash_fields = dev_flow->hash_fields;
10534         rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL);
10535         rss_desc->shared_rss = 0;
10536         *hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
10537         if (!*hrxq_idx)
10538                 return NULL;
10539         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
10540                               *hrxq_idx);
10541         return hrxq;
10542 }
10543
10544 /**
10545  * Release sample sub action resource.
10546  *
10547  * @param[in, out] dev
10548  *   Pointer to rte_eth_dev structure.
10549  * @param[in] act_res
10550  *   Pointer to sample sub action resource.
10551  */
10552 static void
10553 flow_dv_sample_sub_actions_release(struct rte_eth_dev *dev,
10554                                    struct mlx5_flow_sub_actions_idx *act_res)
10555 {
10556         if (act_res->rix_hrxq) {
10557                 mlx5_hrxq_release(dev, act_res->rix_hrxq);
10558                 act_res->rix_hrxq = 0;
10559         }
10560         if (act_res->rix_encap_decap) {
10561                 flow_dv_encap_decap_resource_release(dev,
10562                                                      act_res->rix_encap_decap);
10563                 act_res->rix_encap_decap = 0;
10564         }
10565         if (act_res->rix_port_id_action) {
10566                 flow_dv_port_id_action_resource_release(dev,
10567                                                 act_res->rix_port_id_action);
10568                 act_res->rix_port_id_action = 0;
10569         }
10570         if (act_res->rix_tag) {
10571                 flow_dv_tag_release(dev, act_res->rix_tag);
10572                 act_res->rix_tag = 0;
10573         }
10574         if (act_res->rix_jump) {
10575                 flow_dv_jump_tbl_resource_release(dev, act_res->rix_jump);
10576                 act_res->rix_jump = 0;
10577         }
10578 }
10579
10580 int
10581 flow_dv_sample_match_cb(struct mlx5_cache_list *list __rte_unused,
10582                         struct mlx5_cache_entry *entry, void *cb_ctx)
10583 {
10584         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10585         struct rte_eth_dev *dev = ctx->dev;
10586         struct mlx5_flow_dv_sample_resource *resource = ctx->data;
10587         struct mlx5_flow_dv_sample_resource *cache_resource =
10588                         container_of(entry, typeof(*cache_resource), entry);
10589
10590         if (resource->ratio == cache_resource->ratio &&
10591             resource->ft_type == cache_resource->ft_type &&
10592             resource->ft_id == cache_resource->ft_id &&
10593             resource->set_action == cache_resource->set_action &&
10594             !memcmp((void *)&resource->sample_act,
10595                     (void *)&cache_resource->sample_act,
10596                     sizeof(struct mlx5_flow_sub_actions_list))) {
10597                 /*
10598                  * Existing sample action should release the prepared
10599                  * sub-actions reference counter.
10600                  */
10601                 flow_dv_sample_sub_actions_release(dev,
10602                                                 &resource->sample_idx);
10603                 return 0;
10604         }
10605         return 1;
10606 }
10607
10608 struct mlx5_cache_entry *
10609 flow_dv_sample_create_cb(struct mlx5_cache_list *list __rte_unused,
10610                          struct mlx5_cache_entry *entry __rte_unused,
10611                          void *cb_ctx)
10612 {
10613         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10614         struct rte_eth_dev *dev = ctx->dev;
10615         struct mlx5_flow_dv_sample_resource *resource = ctx->data;
10616         void **sample_dv_actions = resource->sub_actions;
10617         struct mlx5_flow_dv_sample_resource *cache_resource;
10618         struct mlx5dv_dr_flow_sampler_attr sampler_attr;
10619         struct mlx5_priv *priv = dev->data->dev_private;
10620         struct mlx5_dev_ctx_shared *sh = priv->sh;
10621         struct mlx5_flow_tbl_resource *tbl;
10622         uint32_t idx = 0;
10623         const uint32_t next_ft_step = 1;
10624         uint32_t next_ft_id = resource->ft_id + next_ft_step;
10625         uint8_t is_egress = 0;
10626         uint8_t is_transfer = 0;
10627         struct rte_flow_error *error = ctx->error;
10628
10629         /* Register new sample resource. */
10630         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
10631         if (!cache_resource) {
10632                 rte_flow_error_set(error, ENOMEM,
10633                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10634                                           NULL,
10635                                           "cannot allocate resource memory");
10636                 return NULL;
10637         }
10638         *cache_resource = *resource;
10639         /* Create normal path table level */
10640         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
10641                 is_transfer = 1;
10642         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
10643                 is_egress = 1;
10644         tbl = flow_dv_tbl_resource_get(dev, next_ft_id,
10645                                         is_egress, is_transfer,
10646                                         true, NULL, 0, 0, 0, error);
10647         if (!tbl) {
10648                 rte_flow_error_set(error, ENOMEM,
10649                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10650                                           NULL,
10651                                           "fail to create normal path table "
10652                                           "for sample");
10653                 goto error;
10654         }
10655         cache_resource->normal_path_tbl = tbl;
10656         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
10657                 if (!sh->default_miss_action) {
10658                         rte_flow_error_set(error, ENOMEM,
10659                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10660                                                 NULL,
10661                                                 "default miss action was not "
10662                                                 "created");
10663                         goto error;
10664                 }
10665                 sample_dv_actions[resource->sample_act.actions_num++] =
10666                                                 sh->default_miss_action;
10667         }
10668         /* Create a DR sample action */
10669         sampler_attr.sample_ratio = cache_resource->ratio;
10670         sampler_attr.default_next_table = tbl->obj;
10671         sampler_attr.num_sample_actions = resource->sample_act.actions_num;
10672         sampler_attr.sample_actions = (struct mlx5dv_dr_action **)
10673                                                         &sample_dv_actions[0];
10674         sampler_attr.action = cache_resource->set_action;
10675         if (mlx5_os_flow_dr_create_flow_action_sampler
10676                         (&sampler_attr, &cache_resource->verbs_action)) {
10677                 rte_flow_error_set(error, ENOMEM,
10678                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10679                                         NULL, "cannot create sample action");
10680                 goto error;
10681         }
10682         cache_resource->idx = idx;
10683         cache_resource->dev = dev;
10684         return &cache_resource->entry;
10685 error:
10686         if (cache_resource->ft_type != MLX5DV_FLOW_TABLE_TYPE_FDB)
10687                 flow_dv_sample_sub_actions_release(dev,
10688                                                    &cache_resource->sample_idx);
10689         if (cache_resource->normal_path_tbl)
10690                 flow_dv_tbl_resource_release(MLX5_SH(dev),
10691                                 cache_resource->normal_path_tbl);
10692         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE], idx);
10693         return NULL;
10694
10695 }
10696
10697 /**
10698  * Find existing sample resource or create and register a new one.
10699  *
10700  * @param[in, out] dev
10701  *   Pointer to rte_eth_dev structure.
10702  * @param[in] resource
10703  *   Pointer to sample resource.
10704  * @parm[in, out] dev_flow
10705  *   Pointer to the dev_flow.
10706  * @param[out] error
10707  *   pointer to error structure.
10708  *
10709  * @return
10710  *   0 on success otherwise -errno and errno is set.
10711  */
10712 static int
10713 flow_dv_sample_resource_register(struct rte_eth_dev *dev,
10714                          struct mlx5_flow_dv_sample_resource *resource,
10715                          struct mlx5_flow *dev_flow,
10716                          struct rte_flow_error *error)
10717 {
10718         struct mlx5_flow_dv_sample_resource *cache_resource;
10719         struct mlx5_cache_entry *entry;
10720         struct mlx5_priv *priv = dev->data->dev_private;
10721         struct mlx5_flow_cb_ctx ctx = {
10722                 .dev = dev,
10723                 .error = error,
10724                 .data = resource,
10725         };
10726
10727         entry = mlx5_cache_register(&priv->sh->sample_action_list, &ctx);
10728         if (!entry)
10729                 return -rte_errno;
10730         cache_resource = container_of(entry, typeof(*cache_resource), entry);
10731         dev_flow->handle->dvh.rix_sample = cache_resource->idx;
10732         dev_flow->dv.sample_res = cache_resource;
10733         return 0;
10734 }
10735
10736 int
10737 flow_dv_dest_array_match_cb(struct mlx5_cache_list *list __rte_unused,
10738                             struct mlx5_cache_entry *entry, void *cb_ctx)
10739 {
10740         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10741         struct mlx5_flow_dv_dest_array_resource *resource = ctx->data;
10742         struct rte_eth_dev *dev = ctx->dev;
10743         struct mlx5_flow_dv_dest_array_resource *cache_resource =
10744                         container_of(entry, typeof(*cache_resource), entry);
10745         uint32_t idx = 0;
10746
10747         if (resource->num_of_dest == cache_resource->num_of_dest &&
10748             resource->ft_type == cache_resource->ft_type &&
10749             !memcmp((void *)cache_resource->sample_act,
10750                     (void *)resource->sample_act,
10751                    (resource->num_of_dest *
10752                    sizeof(struct mlx5_flow_sub_actions_list)))) {
10753                 /*
10754                  * Existing sample action should release the prepared
10755                  * sub-actions reference counter.
10756                  */
10757                 for (idx = 0; idx < resource->num_of_dest; idx++)
10758                         flow_dv_sample_sub_actions_release(dev,
10759                                         &resource->sample_idx[idx]);
10760                 return 0;
10761         }
10762         return 1;
10763 }
10764
10765 struct mlx5_cache_entry *
10766 flow_dv_dest_array_create_cb(struct mlx5_cache_list *list __rte_unused,
10767                          struct mlx5_cache_entry *entry __rte_unused,
10768                          void *cb_ctx)
10769 {
10770         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10771         struct rte_eth_dev *dev = ctx->dev;
10772         struct mlx5_flow_dv_dest_array_resource *cache_resource;
10773         struct mlx5_flow_dv_dest_array_resource *resource = ctx->data;
10774         struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 };
10775         struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM];
10776         struct mlx5_priv *priv = dev->data->dev_private;
10777         struct mlx5_dev_ctx_shared *sh = priv->sh;
10778         struct mlx5_flow_sub_actions_list *sample_act;
10779         struct mlx5dv_dr_domain *domain;
10780         uint32_t idx = 0, res_idx = 0;
10781         struct rte_flow_error *error = ctx->error;
10782         uint64_t action_flags;
10783         int ret;
10784
10785         /* Register new destination array resource. */
10786         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
10787                                             &res_idx);
10788         if (!cache_resource) {
10789                 rte_flow_error_set(error, ENOMEM,
10790                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10791                                           NULL,
10792                                           "cannot allocate resource memory");
10793                 return NULL;
10794         }
10795         *cache_resource = *resource;
10796         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
10797                 domain = sh->fdb_domain;
10798         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
10799                 domain = sh->rx_domain;
10800         else
10801                 domain = sh->tx_domain;
10802         for (idx = 0; idx < resource->num_of_dest; idx++) {
10803                 dest_attr[idx] = (struct mlx5dv_dr_action_dest_attr *)
10804                                  mlx5_malloc(MLX5_MEM_ZERO,
10805                                  sizeof(struct mlx5dv_dr_action_dest_attr),
10806                                  0, SOCKET_ID_ANY);
10807                 if (!dest_attr[idx]) {
10808                         rte_flow_error_set(error, ENOMEM,
10809                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10810                                            NULL,
10811                                            "cannot allocate resource memory");
10812                         goto error;
10813                 }
10814                 dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST;
10815                 sample_act = &resource->sample_act[idx];
10816                 action_flags = sample_act->action_flags;
10817                 switch (action_flags) {
10818                 case MLX5_FLOW_ACTION_QUEUE:
10819                         dest_attr[idx]->dest = sample_act->dr_queue_action;
10820                         break;
10821                 case (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_ENCAP):
10822                         dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST_REFORMAT;
10823                         dest_attr[idx]->dest_reformat = &dest_reformat[idx];
10824                         dest_attr[idx]->dest_reformat->reformat =
10825                                         sample_act->dr_encap_action;
10826                         dest_attr[idx]->dest_reformat->dest =
10827                                         sample_act->dr_port_id_action;
10828                         break;
10829                 case MLX5_FLOW_ACTION_PORT_ID:
10830                         dest_attr[idx]->dest = sample_act->dr_port_id_action;
10831                         break;
10832                 case MLX5_FLOW_ACTION_JUMP:
10833                         dest_attr[idx]->dest = sample_act->dr_jump_action;
10834                         break;
10835                 default:
10836                         rte_flow_error_set(error, EINVAL,
10837                                            RTE_FLOW_ERROR_TYPE_ACTION,
10838                                            NULL,
10839                                            "unsupported actions type");
10840                         goto error;
10841                 }
10842         }
10843         /* create a dest array actioin */
10844         ret = mlx5_os_flow_dr_create_flow_action_dest_array
10845                                                 (domain,
10846                                                  cache_resource->num_of_dest,
10847                                                  dest_attr,
10848                                                  &cache_resource->action);
10849         if (ret) {
10850                 rte_flow_error_set(error, ENOMEM,
10851                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10852                                    NULL,
10853                                    "cannot create destination array action");
10854                 goto error;
10855         }
10856         cache_resource->idx = res_idx;
10857         cache_resource->dev = dev;
10858         for (idx = 0; idx < resource->num_of_dest; idx++)
10859                 mlx5_free(dest_attr[idx]);
10860         return &cache_resource->entry;
10861 error:
10862         for (idx = 0; idx < resource->num_of_dest; idx++) {
10863                 flow_dv_sample_sub_actions_release(dev,
10864                                 &cache_resource->sample_idx[idx]);
10865                 if (dest_attr[idx])
10866                         mlx5_free(dest_attr[idx]);
10867         }
10868
10869         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY], res_idx);
10870         return NULL;
10871 }
10872
10873 /**
10874  * Find existing destination array resource or create and register a new one.
10875  *
10876  * @param[in, out] dev
10877  *   Pointer to rte_eth_dev structure.
10878  * @param[in] resource
10879  *   Pointer to destination array resource.
10880  * @parm[in, out] dev_flow
10881  *   Pointer to the dev_flow.
10882  * @param[out] error
10883  *   pointer to error structure.
10884  *
10885  * @return
10886  *   0 on success otherwise -errno and errno is set.
10887  */
10888 static int
10889 flow_dv_dest_array_resource_register(struct rte_eth_dev *dev,
10890                          struct mlx5_flow_dv_dest_array_resource *resource,
10891                          struct mlx5_flow *dev_flow,
10892                          struct rte_flow_error *error)
10893 {
10894         struct mlx5_flow_dv_dest_array_resource *cache_resource;
10895         struct mlx5_priv *priv = dev->data->dev_private;
10896         struct mlx5_cache_entry *entry;
10897         struct mlx5_flow_cb_ctx ctx = {
10898                 .dev = dev,
10899                 .error = error,
10900                 .data = resource,
10901         };
10902
10903         entry = mlx5_cache_register(&priv->sh->dest_array_list, &ctx);
10904         if (!entry)
10905                 return -rte_errno;
10906         cache_resource = container_of(entry, typeof(*cache_resource), entry);
10907         dev_flow->handle->dvh.rix_dest_array = cache_resource->idx;
10908         dev_flow->dv.dest_array_res = cache_resource;
10909         return 0;
10910 }
10911
10912 /**
10913  * Convert Sample action to DV specification.
10914  *
10915  * @param[in] dev
10916  *   Pointer to rte_eth_dev structure.
10917  * @param[in] action
10918  *   Pointer to sample action structure.
10919  * @param[in, out] dev_flow
10920  *   Pointer to the mlx5_flow.
10921  * @param[in] attr
10922  *   Pointer to the flow attributes.
10923  * @param[in, out] num_of_dest
10924  *   Pointer to the num of destination.
10925  * @param[in, out] sample_actions
10926  *   Pointer to sample actions list.
10927  * @param[in, out] res
10928  *   Pointer to sample resource.
10929  * @param[out] error
10930  *   Pointer to the error structure.
10931  *
10932  * @return
10933  *   0 on success, a negative errno value otherwise and rte_errno is set.
10934  */
10935 static int
10936 flow_dv_translate_action_sample(struct rte_eth_dev *dev,
10937                                 const struct rte_flow_action_sample *action,
10938                                 struct mlx5_flow *dev_flow,
10939                                 const struct rte_flow_attr *attr,
10940                                 uint32_t *num_of_dest,
10941                                 void **sample_actions,
10942                                 struct mlx5_flow_dv_sample_resource *res,
10943                                 struct rte_flow_error *error)
10944 {
10945         struct mlx5_priv *priv = dev->data->dev_private;
10946         const struct rte_flow_action *sub_actions;
10947         struct mlx5_flow_sub_actions_list *sample_act;
10948         struct mlx5_flow_sub_actions_idx *sample_idx;
10949         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
10950         struct rte_flow *flow = dev_flow->flow;
10951         struct mlx5_flow_rss_desc *rss_desc;
10952         uint64_t action_flags = 0;
10953
10954         MLX5_ASSERT(wks);
10955         rss_desc = &wks->rss_desc;
10956         sample_act = &res->sample_act;
10957         sample_idx = &res->sample_idx;
10958         res->ratio = action->ratio;
10959         sub_actions = action->actions;
10960         for (; sub_actions->type != RTE_FLOW_ACTION_TYPE_END; sub_actions++) {
10961                 int type = sub_actions->type;
10962                 uint32_t pre_rix = 0;
10963                 void *pre_r;
10964                 switch (type) {
10965                 case RTE_FLOW_ACTION_TYPE_QUEUE:
10966                 {
10967                         const struct rte_flow_action_queue *queue;
10968                         struct mlx5_hrxq *hrxq;
10969                         uint32_t hrxq_idx;
10970
10971                         queue = sub_actions->conf;
10972                         rss_desc->queue_num = 1;
10973                         rss_desc->queue[0] = queue->index;
10974                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
10975                                                     rss_desc, &hrxq_idx);
10976                         if (!hrxq)
10977                                 return rte_flow_error_set
10978                                         (error, rte_errno,
10979                                          RTE_FLOW_ERROR_TYPE_ACTION,
10980                                          NULL,
10981                                          "cannot create fate queue");
10982                         sample_act->dr_queue_action = hrxq->action;
10983                         sample_idx->rix_hrxq = hrxq_idx;
10984                         sample_actions[sample_act->actions_num++] =
10985                                                 hrxq->action;
10986                         (*num_of_dest)++;
10987                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
10988                         if (action_flags & MLX5_FLOW_ACTION_MARK)
10989                                 dev_flow->handle->rix_hrxq = hrxq_idx;
10990                         dev_flow->handle->fate_action =
10991                                         MLX5_FLOW_FATE_QUEUE;
10992                         break;
10993                 }
10994                 case RTE_FLOW_ACTION_TYPE_RSS:
10995                 {
10996                         struct mlx5_hrxq *hrxq;
10997                         uint32_t hrxq_idx;
10998                         const struct rte_flow_action_rss *rss;
10999                         const uint8_t *rss_key;
11000
11001                         rss = sub_actions->conf;
11002                         memcpy(rss_desc->queue, rss->queue,
11003                                rss->queue_num * sizeof(uint16_t));
11004                         rss_desc->queue_num = rss->queue_num;
11005                         /* NULL RSS key indicates default RSS key. */
11006                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
11007                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
11008                         /*
11009                          * rss->level and rss.types should be set in advance
11010                          * when expanding items for RSS.
11011                          */
11012                         flow_dv_hashfields_set(dev_flow, rss_desc);
11013                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11014                                                     rss_desc, &hrxq_idx);
11015                         if (!hrxq)
11016                                 return rte_flow_error_set
11017                                         (error, rte_errno,
11018                                          RTE_FLOW_ERROR_TYPE_ACTION,
11019                                          NULL,
11020                                          "cannot create fate queue");
11021                         sample_act->dr_queue_action = hrxq->action;
11022                         sample_idx->rix_hrxq = hrxq_idx;
11023                         sample_actions[sample_act->actions_num++] =
11024                                                 hrxq->action;
11025                         (*num_of_dest)++;
11026                         action_flags |= MLX5_FLOW_ACTION_RSS;
11027                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11028                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11029                         dev_flow->handle->fate_action =
11030                                         MLX5_FLOW_FATE_QUEUE;
11031                         break;
11032                 }
11033                 case RTE_FLOW_ACTION_TYPE_MARK:
11034                 {
11035                         uint32_t tag_be = mlx5_flow_mark_set
11036                                 (((const struct rte_flow_action_mark *)
11037                                 (sub_actions->conf))->id);
11038
11039                         dev_flow->handle->mark = 1;
11040                         pre_rix = dev_flow->handle->dvh.rix_tag;
11041                         /* Save the mark resource before sample */
11042                         pre_r = dev_flow->dv.tag_resource;
11043                         if (flow_dv_tag_resource_register(dev, tag_be,
11044                                                   dev_flow, error))
11045                                 return -rte_errno;
11046                         MLX5_ASSERT(dev_flow->dv.tag_resource);
11047                         sample_act->dr_tag_action =
11048                                 dev_flow->dv.tag_resource->action;
11049                         sample_idx->rix_tag =
11050                                 dev_flow->handle->dvh.rix_tag;
11051                         sample_actions[sample_act->actions_num++] =
11052                                                 sample_act->dr_tag_action;
11053                         /* Recover the mark resource after sample */
11054                         dev_flow->dv.tag_resource = pre_r;
11055                         dev_flow->handle->dvh.rix_tag = pre_rix;
11056                         action_flags |= MLX5_FLOW_ACTION_MARK;
11057                         break;
11058                 }
11059                 case RTE_FLOW_ACTION_TYPE_COUNT:
11060                 {
11061                         if (!flow->counter) {
11062                                 flow->counter =
11063                                         flow_dv_translate_create_counter(dev,
11064                                                 dev_flow, sub_actions->conf,
11065                                                 0);
11066                                 if (!flow->counter)
11067                                         return rte_flow_error_set
11068                                                 (error, rte_errno,
11069                                                 RTE_FLOW_ERROR_TYPE_ACTION,
11070                                                 NULL,
11071                                                 "cannot create counter"
11072                                                 " object.");
11073                         }
11074                         sample_act->dr_cnt_action =
11075                                   (flow_dv_counter_get_by_idx(dev,
11076                                   flow->counter, NULL))->action;
11077                         sample_actions[sample_act->actions_num++] =
11078                                                 sample_act->dr_cnt_action;
11079                         action_flags |= MLX5_FLOW_ACTION_COUNT;
11080                         break;
11081                 }
11082                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
11083                 {
11084                         struct mlx5_flow_dv_port_id_action_resource
11085                                         port_id_resource;
11086                         uint32_t port_id = 0;
11087
11088                         memset(&port_id_resource, 0, sizeof(port_id_resource));
11089                         /* Save the port id resource before sample */
11090                         pre_rix = dev_flow->handle->rix_port_id_action;
11091                         pre_r = dev_flow->dv.port_id_action;
11092                         if (flow_dv_translate_action_port_id(dev, sub_actions,
11093                                                              &port_id, error))
11094                                 return -rte_errno;
11095                         port_id_resource.port_id = port_id;
11096                         if (flow_dv_port_id_action_resource_register
11097                             (dev, &port_id_resource, dev_flow, error))
11098                                 return -rte_errno;
11099                         sample_act->dr_port_id_action =
11100                                 dev_flow->dv.port_id_action->action;
11101                         sample_idx->rix_port_id_action =
11102                                 dev_flow->handle->rix_port_id_action;
11103                         sample_actions[sample_act->actions_num++] =
11104                                                 sample_act->dr_port_id_action;
11105                         /* Recover the port id resource after sample */
11106                         dev_flow->dv.port_id_action = pre_r;
11107                         dev_flow->handle->rix_port_id_action = pre_rix;
11108                         (*num_of_dest)++;
11109                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
11110                         break;
11111                 }
11112                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
11113                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
11114                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
11115                         /* Save the encap resource before sample */
11116                         pre_rix = dev_flow->handle->dvh.rix_encap_decap;
11117                         pre_r = dev_flow->dv.encap_decap;
11118                         if (flow_dv_create_action_l2_encap(dev, sub_actions,
11119                                                            dev_flow,
11120                                                            attr->transfer,
11121                                                            error))
11122                                 return -rte_errno;
11123                         sample_act->dr_encap_action =
11124                                 dev_flow->dv.encap_decap->action;
11125                         sample_idx->rix_encap_decap =
11126                                 dev_flow->handle->dvh.rix_encap_decap;
11127                         sample_actions[sample_act->actions_num++] =
11128                                                 sample_act->dr_encap_action;
11129                         /* Recover the encap resource after sample */
11130                         dev_flow->dv.encap_decap = pre_r;
11131                         dev_flow->handle->dvh.rix_encap_decap = pre_rix;
11132                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
11133                         break;
11134                 default:
11135                         return rte_flow_error_set(error, EINVAL,
11136                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11137                                 NULL,
11138                                 "Not support for sampler action");
11139                 }
11140         }
11141         sample_act->action_flags = action_flags;
11142         res->ft_id = dev_flow->dv.group;
11143         if (attr->transfer) {
11144                 union {
11145                         uint32_t action_in[MLX5_ST_SZ_DW(set_action_in)];
11146                         uint64_t set_action;
11147                 } action_ctx = { .set_action = 0 };
11148
11149                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
11150                 MLX5_SET(set_action_in, action_ctx.action_in, action_type,
11151                          MLX5_MODIFICATION_TYPE_SET);
11152                 MLX5_SET(set_action_in, action_ctx.action_in, field,
11153                          MLX5_MODI_META_REG_C_0);
11154                 MLX5_SET(set_action_in, action_ctx.action_in, data,
11155                          priv->vport_meta_tag);
11156                 res->set_action = action_ctx.set_action;
11157         } else if (attr->ingress) {
11158                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
11159         } else {
11160                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX;
11161         }
11162         return 0;
11163 }
11164
11165 /**
11166  * Convert Sample action to DV specification.
11167  *
11168  * @param[in] dev
11169  *   Pointer to rte_eth_dev structure.
11170  * @param[in, out] dev_flow
11171  *   Pointer to the mlx5_flow.
11172  * @param[in] num_of_dest
11173  *   The num of destination.
11174  * @param[in, out] res
11175  *   Pointer to sample resource.
11176  * @param[in, out] mdest_res
11177  *   Pointer to destination array resource.
11178  * @param[in] sample_actions
11179  *   Pointer to sample path actions list.
11180  * @param[in] action_flags
11181  *   Holds the actions detected until now.
11182  * @param[out] error
11183  *   Pointer to the error structure.
11184  *
11185  * @return
11186  *   0 on success, a negative errno value otherwise and rte_errno is set.
11187  */
11188 static int
11189 flow_dv_create_action_sample(struct rte_eth_dev *dev,
11190                              struct mlx5_flow *dev_flow,
11191                              uint32_t num_of_dest,
11192                              struct mlx5_flow_dv_sample_resource *res,
11193                              struct mlx5_flow_dv_dest_array_resource *mdest_res,
11194                              void **sample_actions,
11195                              uint64_t action_flags,
11196                              struct rte_flow_error *error)
11197 {
11198         /* update normal path action resource into last index of array */
11199         uint32_t dest_index = MLX5_MAX_DEST_NUM - 1;
11200         struct mlx5_flow_sub_actions_list *sample_act =
11201                                         &mdest_res->sample_act[dest_index];
11202         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
11203         struct mlx5_flow_rss_desc *rss_desc;
11204         uint32_t normal_idx = 0;
11205         struct mlx5_hrxq *hrxq;
11206         uint32_t hrxq_idx;
11207
11208         MLX5_ASSERT(wks);
11209         rss_desc = &wks->rss_desc;
11210         if (num_of_dest > 1) {
11211                 if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
11212                         /* Handle QP action for mirroring */
11213                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11214                                                     rss_desc, &hrxq_idx);
11215                         if (!hrxq)
11216                                 return rte_flow_error_set
11217                                      (error, rte_errno,
11218                                       RTE_FLOW_ERROR_TYPE_ACTION,
11219                                       NULL,
11220                                       "cannot create rx queue");
11221                         normal_idx++;
11222                         mdest_res->sample_idx[dest_index].rix_hrxq = hrxq_idx;
11223                         sample_act->dr_queue_action = hrxq->action;
11224                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11225                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11226                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
11227                 }
11228                 if (sample_act->action_flags & MLX5_FLOW_ACTION_ENCAP) {
11229                         normal_idx++;
11230                         mdest_res->sample_idx[dest_index].rix_encap_decap =
11231                                 dev_flow->handle->dvh.rix_encap_decap;
11232                         sample_act->dr_encap_action =
11233                                 dev_flow->dv.encap_decap->action;
11234                         dev_flow->handle->dvh.rix_encap_decap = 0;
11235                 }
11236                 if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) {
11237                         normal_idx++;
11238                         mdest_res->sample_idx[dest_index].rix_port_id_action =
11239                                 dev_flow->handle->rix_port_id_action;
11240                         sample_act->dr_port_id_action =
11241                                 dev_flow->dv.port_id_action->action;
11242                         dev_flow->handle->rix_port_id_action = 0;
11243                 }
11244                 if (sample_act->action_flags & MLX5_FLOW_ACTION_JUMP) {
11245                         normal_idx++;
11246                         mdest_res->sample_idx[dest_index].rix_jump =
11247                                 dev_flow->handle->rix_jump;
11248                         sample_act->dr_jump_action =
11249                                 dev_flow->dv.jump->action;
11250                         dev_flow->handle->rix_jump = 0;
11251                 }
11252                 sample_act->actions_num = normal_idx;
11253                 /* update sample action resource into first index of array */
11254                 mdest_res->ft_type = res->ft_type;
11255                 memcpy(&mdest_res->sample_idx[0], &res->sample_idx,
11256                                 sizeof(struct mlx5_flow_sub_actions_idx));
11257                 memcpy(&mdest_res->sample_act[0], &res->sample_act,
11258                                 sizeof(struct mlx5_flow_sub_actions_list));
11259                 mdest_res->num_of_dest = num_of_dest;
11260                 if (flow_dv_dest_array_resource_register(dev, mdest_res,
11261                                                          dev_flow, error))
11262                         return rte_flow_error_set(error, EINVAL,
11263                                                   RTE_FLOW_ERROR_TYPE_ACTION,
11264                                                   NULL, "can't create sample "
11265                                                   "action");
11266         } else {
11267                 res->sub_actions = sample_actions;
11268                 if (flow_dv_sample_resource_register(dev, res, dev_flow, error))
11269                         return rte_flow_error_set(error, EINVAL,
11270                                                   RTE_FLOW_ERROR_TYPE_ACTION,
11271                                                   NULL,
11272                                                   "can't create sample action");
11273         }
11274         return 0;
11275 }
11276
11277 /**
11278  * Remove an ASO age action from age actions list.
11279  *
11280  * @param[in] dev
11281  *   Pointer to the Ethernet device structure.
11282  * @param[in] age
11283  *   Pointer to the aso age action handler.
11284  */
11285 static void
11286 flow_dv_aso_age_remove_from_age(struct rte_eth_dev *dev,
11287                                 struct mlx5_aso_age_action *age)
11288 {
11289         struct mlx5_age_info *age_info;
11290         struct mlx5_age_param *age_param = &age->age_params;
11291         struct mlx5_priv *priv = dev->data->dev_private;
11292         uint16_t expected = AGE_CANDIDATE;
11293
11294         age_info = GET_PORT_AGE_INFO(priv);
11295         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
11296                                          AGE_FREE, false, __ATOMIC_RELAXED,
11297                                          __ATOMIC_RELAXED)) {
11298                 /**
11299                  * We need the lock even it is age timeout,
11300                  * since age action may still in process.
11301                  */
11302                 rte_spinlock_lock(&age_info->aged_sl);
11303                 LIST_REMOVE(age, next);
11304                 rte_spinlock_unlock(&age_info->aged_sl);
11305                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
11306         }
11307 }
11308
11309 /**
11310  * Release an ASO age action.
11311  *
11312  * @param[in] dev
11313  *   Pointer to the Ethernet device structure.
11314  * @param[in] age_idx
11315  *   Index of ASO age action to release.
11316  * @param[in] flow
11317  *   True if the release operation is during flow destroy operation.
11318  *   False if the release operation is during action destroy operation.
11319  *
11320  * @return
11321  *   0 when age action was removed, otherwise the number of references.
11322  */
11323 static int
11324 flow_dv_aso_age_release(struct rte_eth_dev *dev, uint32_t age_idx)
11325 {
11326         struct mlx5_priv *priv = dev->data->dev_private;
11327         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11328         struct mlx5_aso_age_action *age = flow_aso_age_get_by_idx(dev, age_idx);
11329         uint32_t ret = __atomic_sub_fetch(&age->refcnt, 1, __ATOMIC_RELAXED);
11330
11331         if (!ret) {
11332                 flow_dv_aso_age_remove_from_age(dev, age);
11333                 rte_spinlock_lock(&mng->free_sl);
11334                 LIST_INSERT_HEAD(&mng->free, age, next);
11335                 rte_spinlock_unlock(&mng->free_sl);
11336         }
11337         return ret;
11338 }
11339
11340 /**
11341  * Resize the ASO age pools array by MLX5_CNT_CONTAINER_RESIZE pools.
11342  *
11343  * @param[in] dev
11344  *   Pointer to the Ethernet device structure.
11345  *
11346  * @return
11347  *   0 on success, otherwise negative errno value and rte_errno is set.
11348  */
11349 static int
11350 flow_dv_aso_age_pools_resize(struct rte_eth_dev *dev)
11351 {
11352         struct mlx5_priv *priv = dev->data->dev_private;
11353         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11354         void *old_pools = mng->pools;
11355         uint32_t resize = mng->n + MLX5_CNT_CONTAINER_RESIZE;
11356         uint32_t mem_size = sizeof(struct mlx5_aso_age_pool *) * resize;
11357         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
11358
11359         if (!pools) {
11360                 rte_errno = ENOMEM;
11361                 return -ENOMEM;
11362         }
11363         if (old_pools) {
11364                 memcpy(pools, old_pools,
11365                        mng->n * sizeof(struct mlx5_flow_counter_pool *));
11366                 mlx5_free(old_pools);
11367         } else {
11368                 /* First ASO flow hit allocation - starting ASO data-path. */
11369                 int ret = mlx5_aso_flow_hit_queue_poll_start(priv->sh);
11370
11371                 if (ret) {
11372                         mlx5_free(pools);
11373                         return ret;
11374                 }
11375         }
11376         mng->n = resize;
11377         mng->pools = pools;
11378         return 0;
11379 }
11380
11381 /**
11382  * Create and initialize a new ASO aging pool.
11383  *
11384  * @param[in] dev
11385  *   Pointer to the Ethernet device structure.
11386  * @param[out] age_free
11387  *   Where to put the pointer of a new age action.
11388  *
11389  * @return
11390  *   The age actions pool pointer and @p age_free is set on success,
11391  *   NULL otherwise and rte_errno is set.
11392  */
11393 static struct mlx5_aso_age_pool *
11394 flow_dv_age_pool_create(struct rte_eth_dev *dev,
11395                         struct mlx5_aso_age_action **age_free)
11396 {
11397         struct mlx5_priv *priv = dev->data->dev_private;
11398         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11399         struct mlx5_aso_age_pool *pool = NULL;
11400         struct mlx5_devx_obj *obj = NULL;
11401         uint32_t i;
11402
11403         obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->ctx,
11404                                                     priv->sh->pdn);
11405         if (!obj) {
11406                 rte_errno = ENODATA;
11407                 DRV_LOG(ERR, "Failed to create flow_hit_aso_obj using DevX.");
11408                 return NULL;
11409         }
11410         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
11411         if (!pool) {
11412                 claim_zero(mlx5_devx_cmd_destroy(obj));
11413                 rte_errno = ENOMEM;
11414                 return NULL;
11415         }
11416         pool->flow_hit_aso_obj = obj;
11417         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
11418         rte_spinlock_lock(&mng->resize_sl);
11419         pool->index = mng->next;
11420         /* Resize pools array if there is no room for the new pool in it. */
11421         if (pool->index == mng->n && flow_dv_aso_age_pools_resize(dev)) {
11422                 claim_zero(mlx5_devx_cmd_destroy(obj));
11423                 mlx5_free(pool);
11424                 rte_spinlock_unlock(&mng->resize_sl);
11425                 return NULL;
11426         }
11427         mng->pools[pool->index] = pool;
11428         mng->next++;
11429         rte_spinlock_unlock(&mng->resize_sl);
11430         /* Assign the first action in the new pool, the rest go to free list. */
11431         *age_free = &pool->actions[0];
11432         for (i = 1; i < MLX5_ASO_AGE_ACTIONS_PER_POOL; i++) {
11433                 pool->actions[i].offset = i;
11434                 LIST_INSERT_HEAD(&mng->free, &pool->actions[i], next);
11435         }
11436         return pool;
11437 }
11438
11439 /**
11440  * Allocate a ASO aging bit.
11441  *
11442  * @param[in] dev
11443  *   Pointer to the Ethernet device structure.
11444  * @param[out] error
11445  *   Pointer to the error structure.
11446  *
11447  * @return
11448  *   Index to ASO age action on success, 0 otherwise and rte_errno is set.
11449  */
11450 static uint32_t
11451 flow_dv_aso_age_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
11452 {
11453         struct mlx5_priv *priv = dev->data->dev_private;
11454         const struct mlx5_aso_age_pool *pool;
11455         struct mlx5_aso_age_action *age_free = NULL;
11456         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11457
11458         MLX5_ASSERT(mng);
11459         /* Try to get the next free age action bit. */
11460         rte_spinlock_lock(&mng->free_sl);
11461         age_free = LIST_FIRST(&mng->free);
11462         if (age_free) {
11463                 LIST_REMOVE(age_free, next);
11464         } else if (!flow_dv_age_pool_create(dev, &age_free)) {
11465                 rte_spinlock_unlock(&mng->free_sl);
11466                 rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
11467                                    NULL, "failed to create ASO age pool");
11468                 return 0; /* 0 is an error. */
11469         }
11470         rte_spinlock_unlock(&mng->free_sl);
11471         pool = container_of
11472           ((const struct mlx5_aso_age_action (*)[MLX5_ASO_AGE_ACTIONS_PER_POOL])
11473                   (age_free - age_free->offset), const struct mlx5_aso_age_pool,
11474                                                                        actions);
11475         if (!age_free->dr_action) {
11476                 int reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_FLOW_HIT, 0,
11477                                                  error);
11478
11479                 if (reg_c < 0) {
11480                         rte_flow_error_set(error, rte_errno,
11481                                            RTE_FLOW_ERROR_TYPE_ACTION,
11482                                            NULL, "failed to get reg_c "
11483                                            "for ASO flow hit");
11484                         return 0; /* 0 is an error. */
11485                 }
11486 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
11487                 age_free->dr_action = mlx5_glue->dv_create_flow_action_aso
11488                                 (priv->sh->rx_domain,
11489                                  pool->flow_hit_aso_obj->obj, age_free->offset,
11490                                  MLX5DV_DR_ACTION_FLAGS_ASO_FIRST_HIT_SET,
11491                                  (reg_c - REG_C_0));
11492 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
11493                 if (!age_free->dr_action) {
11494                         rte_errno = errno;
11495                         rte_spinlock_lock(&mng->free_sl);
11496                         LIST_INSERT_HEAD(&mng->free, age_free, next);
11497                         rte_spinlock_unlock(&mng->free_sl);
11498                         rte_flow_error_set(error, rte_errno,
11499                                            RTE_FLOW_ERROR_TYPE_ACTION,
11500                                            NULL, "failed to create ASO "
11501                                            "flow hit action");
11502                         return 0; /* 0 is an error. */
11503                 }
11504         }
11505         __atomic_store_n(&age_free->refcnt, 1, __ATOMIC_RELAXED);
11506         return pool->index | ((age_free->offset + 1) << 16);
11507 }
11508
11509 /**
11510  * Initialize flow ASO age parameters.
11511  *
11512  * @param[in] dev
11513  *   Pointer to rte_eth_dev structure.
11514  * @param[in] age_idx
11515  *   Index of ASO age action.
11516  * @param[in] context
11517  *   Pointer to flow counter age context.
11518  * @param[in] timeout
11519  *   Aging timeout in seconds.
11520  *
11521  */
11522 static void
11523 flow_dv_aso_age_params_init(struct rte_eth_dev *dev,
11524                             uint32_t age_idx,
11525                             void *context,
11526                             uint32_t timeout)
11527 {
11528         struct mlx5_aso_age_action *aso_age;
11529
11530         aso_age = flow_aso_age_get_by_idx(dev, age_idx);
11531         MLX5_ASSERT(aso_age);
11532         aso_age->age_params.context = context;
11533         aso_age->age_params.timeout = timeout;
11534         aso_age->age_params.port_id = dev->data->port_id;
11535         __atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0,
11536                          __ATOMIC_RELAXED);
11537         __atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE,
11538                          __ATOMIC_RELAXED);
11539 }
11540
11541 static void
11542 flow_dv_translate_integrity_l4(const struct rte_flow_item_integrity *mask,
11543                                const struct rte_flow_item_integrity *value,
11544                                void *headers_m, void *headers_v)
11545 {
11546         if (mask->l4_ok) {
11547                 /* application l4_ok filter aggregates all hardware l4 filters
11548                  * therefore hw l4_checksum_ok must be implicitly added here.
11549                  */
11550                 struct rte_flow_item_integrity local_item;
11551
11552                 local_item.l4_csum_ok = 1;
11553                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok,
11554                          local_item.l4_csum_ok);
11555                 if (value->l4_ok) {
11556                         /* application l4_ok = 1 matches sets both hw flags
11557                          * l4_ok and l4_checksum_ok flags to 1.
11558                          */
11559                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
11560                                  l4_checksum_ok, local_item.l4_csum_ok);
11561                         MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_ok,
11562                                  mask->l4_ok);
11563                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_ok,
11564                                  value->l4_ok);
11565                 } else {
11566                         /* application l4_ok = 0 matches on hw flag
11567                          * l4_checksum_ok = 0 only.
11568                          */
11569                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
11570                                  l4_checksum_ok, 0);
11571                 }
11572         } else if (mask->l4_csum_ok) {
11573                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok,
11574                          mask->l4_csum_ok);
11575                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_checksum_ok,
11576                          value->l4_csum_ok);
11577         }
11578 }
11579
11580 static void
11581 flow_dv_translate_integrity_l3(const struct rte_flow_item_integrity *mask,
11582                                const struct rte_flow_item_integrity *value,
11583                                void *headers_m, void *headers_v,
11584                                bool is_ipv4)
11585 {
11586         if (mask->l3_ok) {
11587                 /* application l3_ok filter aggregates all hardware l3 filters
11588                  * therefore hw ipv4_checksum_ok must be implicitly added here.
11589                  */
11590                 struct rte_flow_item_integrity local_item;
11591
11592                 local_item.ipv4_csum_ok = !!is_ipv4;
11593                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok,
11594                          local_item.ipv4_csum_ok);
11595                 if (value->l3_ok) {
11596                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
11597                                  ipv4_checksum_ok, local_item.ipv4_csum_ok);
11598                         MLX5_SET(fte_match_set_lyr_2_4, headers_m, l3_ok,
11599                                  mask->l3_ok);
11600                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, l3_ok,
11601                                  value->l3_ok);
11602                 } else {
11603                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
11604                                  ipv4_checksum_ok, 0);
11605                 }
11606         } else if (mask->ipv4_csum_ok) {
11607                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok,
11608                          mask->ipv4_csum_ok);
11609                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_checksum_ok,
11610                          value->ipv4_csum_ok);
11611         }
11612 }
11613
11614 static void
11615 flow_dv_translate_item_integrity(void *matcher, void *key,
11616                                  const struct rte_flow_item *head_item,
11617                                  const struct rte_flow_item *integrity_item)
11618 {
11619         const struct rte_flow_item_integrity *mask = integrity_item->mask;
11620         const struct rte_flow_item_integrity *value = integrity_item->spec;
11621         const struct rte_flow_item *tunnel_item, *end_item, *item;
11622         void *headers_m;
11623         void *headers_v;
11624         uint32_t l3_protocol;
11625
11626         if (!value)
11627                 return;
11628         if (!mask)
11629                 mask = &rte_flow_item_integrity_mask;
11630         if (value->level > 1) {
11631                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
11632                                          inner_headers);
11633                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
11634         } else {
11635                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
11636                                          outer_headers);
11637                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
11638         }
11639         tunnel_item = mlx5_flow_find_tunnel_item(head_item);
11640         if (value->level > 1) {
11641                 /* tunnel item was verified during the item validation */
11642                 item = tunnel_item;
11643                 end_item = mlx5_find_end_item(tunnel_item);
11644         } else {
11645                 item = head_item;
11646                 end_item = tunnel_item ? tunnel_item :
11647                            mlx5_find_end_item(integrity_item);
11648         }
11649         l3_protocol = mask->l3_ok ?
11650                       mlx5_flow_locate_proto_l3(&item, end_item) : 0;
11651         flow_dv_translate_integrity_l3(mask, value, headers_m, headers_v,
11652                                        l3_protocol == RTE_ETHER_TYPE_IPV4);
11653         flow_dv_translate_integrity_l4(mask, value, headers_m, headers_v);
11654 }
11655
11656 /**
11657  * Prepares DV flow counter with aging configuration.
11658  * Gets it by index when exists, creates a new one when doesn't.
11659  *
11660  * @param[in] dev
11661  *   Pointer to rte_eth_dev structure.
11662  * @param[in] dev_flow
11663  *   Pointer to the mlx5_flow.
11664  * @param[in, out] flow
11665  *   Pointer to the sub flow.
11666  * @param[in] count
11667  *   Pointer to the counter action configuration.
11668  * @param[in] age
11669  *   Pointer to the aging action configuration.
11670  * @param[out] error
11671  *   Pointer to the error structure.
11672  *
11673  * @return
11674  *   Pointer to the counter, NULL otherwise.
11675  */
11676 static struct mlx5_flow_counter *
11677 flow_dv_prepare_counter(struct rte_eth_dev *dev,
11678                         struct mlx5_flow *dev_flow,
11679                         struct rte_flow *flow,
11680                         const struct rte_flow_action_count *count,
11681                         const struct rte_flow_action_age *age,
11682                         struct rte_flow_error *error)
11683 {
11684         if (!flow->counter) {
11685                 flow->counter = flow_dv_translate_create_counter(dev, dev_flow,
11686                                                                  count, age);
11687                 if (!flow->counter) {
11688                         rte_flow_error_set(error, rte_errno,
11689                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11690                                            "cannot create counter object.");
11691                         return NULL;
11692                 }
11693         }
11694         return flow_dv_counter_get_by_idx(dev, flow->counter, NULL);
11695 }
11696
11697 /*
11698  * Release an ASO CT action by its own device.
11699  *
11700  * @param[in] dev
11701  *   Pointer to the Ethernet device structure.
11702  * @param[in] idx
11703  *   Index of ASO CT action to release.
11704  *
11705  * @return
11706  *   0 when CT action was removed, otherwise the number of references.
11707  */
11708 static inline int
11709 flow_dv_aso_ct_dev_release(struct rte_eth_dev *dev, uint32_t idx)
11710 {
11711         struct mlx5_priv *priv = dev->data->dev_private;
11712         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
11713         uint32_t ret;
11714         struct mlx5_aso_ct_action *ct = flow_aso_ct_get_by_dev_idx(dev, idx);
11715         enum mlx5_aso_ct_state state =
11716                         __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
11717
11718         /* Cannot release when CT is in the ASO SQ. */
11719         if (state == ASO_CONNTRACK_WAIT || state == ASO_CONNTRACK_QUERY)
11720                 return -1;
11721         ret = __atomic_sub_fetch(&ct->refcnt, 1, __ATOMIC_RELAXED);
11722         if (!ret) {
11723                 if (ct->dr_action_orig) {
11724 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
11725                         claim_zero(mlx5_glue->destroy_flow_action
11726                                         (ct->dr_action_orig));
11727 #endif
11728                         ct->dr_action_orig = NULL;
11729                 }
11730                 if (ct->dr_action_rply) {
11731 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
11732                         claim_zero(mlx5_glue->destroy_flow_action
11733                                         (ct->dr_action_rply));
11734 #endif
11735                         ct->dr_action_rply = NULL;
11736                 }
11737                 /* Clear the state to free, no need in 1st allocation. */
11738                 MLX5_ASO_CT_UPDATE_STATE(ct, ASO_CONNTRACK_FREE);
11739                 rte_spinlock_lock(&mng->ct_sl);
11740                 LIST_INSERT_HEAD(&mng->free_cts, ct, next);
11741                 rte_spinlock_unlock(&mng->ct_sl);
11742         }
11743         return (int)ret;
11744 }
11745
11746 static inline int
11747 flow_dv_aso_ct_release(struct rte_eth_dev *dev, uint32_t own_idx)
11748 {
11749         uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(own_idx);
11750         uint32_t idx = MLX5_INDIRECT_ACT_CT_GET_IDX(own_idx);
11751         struct rte_eth_dev *owndev = &rte_eth_devices[owner];
11752         RTE_SET_USED(dev);
11753
11754         MLX5_ASSERT(owner < RTE_MAX_ETHPORTS);
11755         if (dev->data->dev_started != 1)
11756                 return -1;
11757         return flow_dv_aso_ct_dev_release(owndev, idx);
11758 }
11759
11760 /*
11761  * Resize the ASO CT pools array by 64 pools.
11762  *
11763  * @param[in] dev
11764  *   Pointer to the Ethernet device structure.
11765  *
11766  * @return
11767  *   0 on success, otherwise negative errno value and rte_errno is set.
11768  */
11769 static int
11770 flow_dv_aso_ct_pools_resize(struct rte_eth_dev *dev)
11771 {
11772         struct mlx5_priv *priv = dev->data->dev_private;
11773         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
11774         void *old_pools = mng->pools;
11775         /* Magic number now, need a macro. */
11776         uint32_t resize = mng->n + 64;
11777         uint32_t mem_size = sizeof(struct mlx5_aso_ct_pool *) * resize;
11778         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
11779
11780         if (!pools) {
11781                 rte_errno = ENOMEM;
11782                 return -rte_errno;
11783         }
11784         rte_rwlock_write_lock(&mng->resize_rwl);
11785         /* ASO SQ/QP was already initialized in the startup. */
11786         if (old_pools) {
11787                 /* Realloc could be an alternative choice. */
11788                 rte_memcpy(pools, old_pools,
11789                            mng->n * sizeof(struct mlx5_aso_ct_pool *));
11790                 mlx5_free(old_pools);
11791         }
11792         mng->n = resize;
11793         mng->pools = pools;
11794         rte_rwlock_write_unlock(&mng->resize_rwl);
11795         return 0;
11796 }
11797
11798 /*
11799  * Create and initialize a new ASO CT pool.
11800  *
11801  * @param[in] dev
11802  *   Pointer to the Ethernet device structure.
11803  * @param[out] ct_free
11804  *   Where to put the pointer of a new CT action.
11805  *
11806  * @return
11807  *   The CT actions pool pointer and @p ct_free is set on success,
11808  *   NULL otherwise and rte_errno is set.
11809  */
11810 static struct mlx5_aso_ct_pool *
11811 flow_dv_ct_pool_create(struct rte_eth_dev *dev,
11812                        struct mlx5_aso_ct_action **ct_free)
11813 {
11814         struct mlx5_priv *priv = dev->data->dev_private;
11815         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
11816         struct mlx5_aso_ct_pool *pool = NULL;
11817         struct mlx5_devx_obj *obj = NULL;
11818         uint32_t i;
11819         uint32_t log_obj_size = rte_log2_u32(MLX5_ASO_CT_ACTIONS_PER_POOL);
11820
11821         obj = mlx5_devx_cmd_create_conn_track_offload_obj(priv->sh->ctx,
11822                                                 priv->sh->pdn, log_obj_size);
11823         if (!obj) {
11824                 rte_errno = ENODATA;
11825                 DRV_LOG(ERR, "Failed to create conn_track_offload_obj using DevX.");
11826                 return NULL;
11827         }
11828         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
11829         if (!pool) {
11830                 rte_errno = ENOMEM;
11831                 claim_zero(mlx5_devx_cmd_destroy(obj));
11832                 return NULL;
11833         }
11834         pool->devx_obj = obj;
11835         pool->index = mng->next;
11836         /* Resize pools array if there is no room for the new pool in it. */
11837         if (pool->index == mng->n && flow_dv_aso_ct_pools_resize(dev)) {
11838                 claim_zero(mlx5_devx_cmd_destroy(obj));
11839                 mlx5_free(pool);
11840                 return NULL;
11841         }
11842         mng->pools[pool->index] = pool;
11843         mng->next++;
11844         /* Assign the first action in the new pool, the rest go to free list. */
11845         *ct_free = &pool->actions[0];
11846         /* Lock outside, the list operation is safe here. */
11847         for (i = 1; i < MLX5_ASO_CT_ACTIONS_PER_POOL; i++) {
11848                 /* refcnt is 0 when allocating the memory. */
11849                 pool->actions[i].offset = i;
11850                 LIST_INSERT_HEAD(&mng->free_cts, &pool->actions[i], next);
11851         }
11852         return pool;
11853 }
11854
11855 /*
11856  * Allocate a ASO CT action from free list.
11857  *
11858  * @param[in] dev
11859  *   Pointer to the Ethernet device structure.
11860  * @param[out] error
11861  *   Pointer to the error structure.
11862  *
11863  * @return
11864  *   Index to ASO CT action on success, 0 otherwise and rte_errno is set.
11865  */
11866 static uint32_t
11867 flow_dv_aso_ct_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
11868 {
11869         struct mlx5_priv *priv = dev->data->dev_private;
11870         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
11871         struct mlx5_aso_ct_action *ct = NULL;
11872         struct mlx5_aso_ct_pool *pool;
11873         uint8_t reg_c;
11874         uint32_t ct_idx;
11875
11876         MLX5_ASSERT(mng);
11877         if (!priv->config.devx) {
11878                 rte_errno = ENOTSUP;
11879                 return 0;
11880         }
11881         /* Get a free CT action, if no, a new pool will be created. */
11882         rte_spinlock_lock(&mng->ct_sl);
11883         ct = LIST_FIRST(&mng->free_cts);
11884         if (ct) {
11885                 LIST_REMOVE(ct, next);
11886         } else if (!flow_dv_ct_pool_create(dev, &ct)) {
11887                 rte_spinlock_unlock(&mng->ct_sl);
11888                 rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
11889                                    NULL, "failed to create ASO CT pool");
11890                 return 0;
11891         }
11892         rte_spinlock_unlock(&mng->ct_sl);
11893         pool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);
11894         ct_idx = MLX5_MAKE_CT_IDX(pool->index, ct->offset);
11895         /* 0: inactive, 1: created, 2+: used by flows. */
11896         __atomic_store_n(&ct->refcnt, 1, __ATOMIC_RELAXED);
11897         reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, error);
11898         if (!ct->dr_action_orig) {
11899 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
11900                 ct->dr_action_orig = mlx5_glue->dv_create_flow_action_aso
11901                         (priv->sh->rx_domain, pool->devx_obj->obj,
11902                          ct->offset,
11903                          MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_INITIATOR,
11904                          reg_c - REG_C_0);
11905 #else
11906                 RTE_SET_USED(reg_c);
11907 #endif
11908                 if (!ct->dr_action_orig) {
11909                         flow_dv_aso_ct_dev_release(dev, ct_idx);
11910                         rte_flow_error_set(error, rte_errno,
11911                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11912                                            "failed to create ASO CT action");
11913                         return 0;
11914                 }
11915         }
11916         if (!ct->dr_action_rply) {
11917 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
11918                 ct->dr_action_rply = mlx5_glue->dv_create_flow_action_aso
11919                         (priv->sh->rx_domain, pool->devx_obj->obj,
11920                          ct->offset,
11921                          MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_RESPONDER,
11922                          reg_c - REG_C_0);
11923 #endif
11924                 if (!ct->dr_action_rply) {
11925                         flow_dv_aso_ct_dev_release(dev, ct_idx);
11926                         rte_flow_error_set(error, rte_errno,
11927                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11928                                            "failed to create ASO CT action");
11929                         return 0;
11930                 }
11931         }
11932         return ct_idx;
11933 }
11934
11935 /*
11936  * Create a conntrack object with context and actions by using ASO mechanism.
11937  *
11938  * @param[in] dev
11939  *   Pointer to rte_eth_dev structure.
11940  * @param[in] pro
11941  *   Pointer to conntrack information profile.
11942  * @param[out] error
11943  *   Pointer to the error structure.
11944  *
11945  * @return
11946  *   Index to conntrack object on success, 0 otherwise.
11947  */
11948 static uint32_t
11949 flow_dv_translate_create_conntrack(struct rte_eth_dev *dev,
11950                                    const struct rte_flow_action_conntrack *pro,
11951                                    struct rte_flow_error *error)
11952 {
11953         struct mlx5_priv *priv = dev->data->dev_private;
11954         struct mlx5_dev_ctx_shared *sh = priv->sh;
11955         struct mlx5_aso_ct_action *ct;
11956         uint32_t idx;
11957
11958         if (!sh->ct_aso_en)
11959                 return rte_flow_error_set(error, ENOTSUP,
11960                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11961                                           "Connection is not supported");
11962         idx = flow_dv_aso_ct_alloc(dev, error);
11963         if (!idx)
11964                 return rte_flow_error_set(error, rte_errno,
11965                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11966                                           "Failed to allocate CT object");
11967         ct = flow_aso_ct_get_by_dev_idx(dev, idx);
11968         if (mlx5_aso_ct_update_by_wqe(sh, ct, pro))
11969                 return rte_flow_error_set(error, EBUSY,
11970                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11971                                           "Failed to update CT");
11972         ct->is_original = !!pro->is_original_dir;
11973         ct->peer = pro->peer_port;
11974         return idx;
11975 }
11976
11977 /**
11978  * Fill the flow with DV spec, lock free
11979  * (mutex should be acquired by caller).
11980  *
11981  * @param[in] dev
11982  *   Pointer to rte_eth_dev structure.
11983  * @param[in, out] dev_flow
11984  *   Pointer to the sub flow.
11985  * @param[in] attr
11986  *   Pointer to the flow attributes.
11987  * @param[in] items
11988  *   Pointer to the list of items.
11989  * @param[in] actions
11990  *   Pointer to the list of actions.
11991  * @param[out] error
11992  *   Pointer to the error structure.
11993  *
11994  * @return
11995  *   0 on success, a negative errno value otherwise and rte_errno is set.
11996  */
11997 static int
11998 flow_dv_translate(struct rte_eth_dev *dev,
11999                   struct mlx5_flow *dev_flow,
12000                   const struct rte_flow_attr *attr,
12001                   const struct rte_flow_item items[],
12002                   const struct rte_flow_action actions[],
12003                   struct rte_flow_error *error)
12004 {
12005         struct mlx5_priv *priv = dev->data->dev_private;
12006         struct mlx5_dev_config *dev_conf = &priv->config;
12007         struct rte_flow *flow = dev_flow->flow;
12008         struct mlx5_flow_handle *handle = dev_flow->handle;
12009         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
12010         struct mlx5_flow_rss_desc *rss_desc;
12011         uint64_t item_flags = 0;
12012         uint64_t last_item = 0;
12013         uint64_t action_flags = 0;
12014         struct mlx5_flow_dv_matcher matcher = {
12015                 .mask = {
12016                         .size = sizeof(matcher.mask.buf) -
12017                                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
12018                 },
12019         };
12020         int actions_n = 0;
12021         bool actions_end = false;
12022         union {
12023                 struct mlx5_flow_dv_modify_hdr_resource res;
12024                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
12025                             sizeof(struct mlx5_modification_cmd) *
12026                             (MLX5_MAX_MODIFY_NUM + 1)];
12027         } mhdr_dummy;
12028         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
12029         const struct rte_flow_action_count *count = NULL;
12030         const struct rte_flow_action_age *non_shared_age = NULL;
12031         union flow_dv_attr flow_attr = { .attr = 0 };
12032         uint32_t tag_be;
12033         union mlx5_flow_tbl_key tbl_key;
12034         uint32_t modify_action_position = UINT32_MAX;
12035         void *match_mask = matcher.mask.buf;
12036         void *match_value = dev_flow->dv.value.buf;
12037         uint8_t next_protocol = 0xff;
12038         struct rte_vlan_hdr vlan = { 0 };
12039         struct mlx5_flow_dv_dest_array_resource mdest_res;
12040         struct mlx5_flow_dv_sample_resource sample_res;
12041         void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
12042         const struct rte_flow_action_sample *sample = NULL;
12043         struct mlx5_flow_sub_actions_list *sample_act;
12044         uint32_t sample_act_pos = UINT32_MAX;
12045         uint32_t age_act_pos = UINT32_MAX;
12046         uint32_t num_of_dest = 0;
12047         int tmp_actions_n = 0;
12048         uint32_t table;
12049         int ret = 0;
12050         const struct mlx5_flow_tunnel *tunnel = NULL;
12051         struct flow_grp_info grp_info = {
12052                 .external = !!dev_flow->external,
12053                 .transfer = !!attr->transfer,
12054                 .fdb_def_rule = !!priv->fdb_def_rule,
12055                 .skip_scale = dev_flow->skip_scale &
12056                         (1 << MLX5_SCALE_FLOW_GROUP_BIT),
12057                 .std_tbl_fix = true,
12058         };
12059         const struct rte_flow_item *head_item = items;
12060
12061         if (!wks)
12062                 return rte_flow_error_set(error, ENOMEM,
12063                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12064                                           NULL,
12065                                           "failed to push flow workspace");
12066         rss_desc = &wks->rss_desc;
12067         memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
12068         memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
12069         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
12070                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
12071         /* update normal path action resource into last index of array */
12072         sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
12073         if (is_tunnel_offload_active(dev)) {
12074                 if (dev_flow->tunnel) {
12075                         RTE_VERIFY(dev_flow->tof_type ==
12076                                    MLX5_TUNNEL_OFFLOAD_MISS_RULE);
12077                         tunnel = dev_flow->tunnel;
12078                 } else {
12079                         tunnel = mlx5_get_tof(items, actions,
12080                                               &dev_flow->tof_type);
12081                         dev_flow->tunnel = tunnel;
12082                 }
12083                 grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
12084                                         (dev, attr, tunnel, dev_flow->tof_type);
12085         }
12086         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
12087                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
12088         ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
12089                                        &grp_info, error);
12090         if (ret)
12091                 return ret;
12092         dev_flow->dv.group = table;
12093         if (attr->transfer)
12094                 mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
12095         /* number of actions must be set to 0 in case of dirty stack. */
12096         mhdr_res->actions_num = 0;
12097         if (is_flow_tunnel_match_rule(dev_flow->tof_type)) {
12098                 /*
12099                  * do not add decap action if match rule drops packet
12100                  * HW rejects rules with decap & drop
12101                  *
12102                  * if tunnel match rule was inserted before matching tunnel set
12103                  * rule flow table used in the match rule must be registered.
12104                  * current implementation handles that in the
12105                  * flow_dv_match_register() at the function end.
12106                  */
12107                 bool add_decap = true;
12108                 const struct rte_flow_action *ptr = actions;
12109
12110                 for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
12111                         if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
12112                                 add_decap = false;
12113                                 break;
12114                         }
12115                 }
12116                 if (add_decap) {
12117                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
12118                                                            attr->transfer,
12119                                                            error))
12120                                 return -rte_errno;
12121                         dev_flow->dv.actions[actions_n++] =
12122                                         dev_flow->dv.encap_decap->action;
12123                         action_flags |= MLX5_FLOW_ACTION_DECAP;
12124                 }
12125         }
12126         for (; !actions_end ; actions++) {
12127                 const struct rte_flow_action_queue *queue;
12128                 const struct rte_flow_action_rss *rss;
12129                 const struct rte_flow_action *action = actions;
12130                 const uint8_t *rss_key;
12131                 struct mlx5_flow_tbl_resource *tbl;
12132                 struct mlx5_aso_age_action *age_act;
12133                 struct mlx5_flow_counter *cnt_act;
12134                 uint32_t port_id = 0;
12135                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
12136                 int action_type = actions->type;
12137                 const struct rte_flow_action *found_action = NULL;
12138                 uint32_t jump_group = 0;
12139                 uint32_t owner_idx;
12140                 struct mlx5_aso_ct_action *ct;
12141
12142                 if (!mlx5_flow_os_action_supported(action_type))
12143                         return rte_flow_error_set(error, ENOTSUP,
12144                                                   RTE_FLOW_ERROR_TYPE_ACTION,
12145                                                   actions,
12146                                                   "action not supported");
12147                 switch (action_type) {
12148                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
12149                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
12150                         break;
12151                 case RTE_FLOW_ACTION_TYPE_VOID:
12152                         break;
12153                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
12154                         if (flow_dv_translate_action_port_id(dev, action,
12155                                                              &port_id, error))
12156                                 return -rte_errno;
12157                         port_id_resource.port_id = port_id;
12158                         MLX5_ASSERT(!handle->rix_port_id_action);
12159                         if (flow_dv_port_id_action_resource_register
12160                             (dev, &port_id_resource, dev_flow, error))
12161                                 return -rte_errno;
12162                         dev_flow->dv.actions[actions_n++] =
12163                                         dev_flow->dv.port_id_action->action;
12164                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
12165                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
12166                         sample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID;
12167                         num_of_dest++;
12168                         break;
12169                 case RTE_FLOW_ACTION_TYPE_FLAG:
12170                         action_flags |= MLX5_FLOW_ACTION_FLAG;
12171                         dev_flow->handle->mark = 1;
12172                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
12173                                 struct rte_flow_action_mark mark = {
12174                                         .id = MLX5_FLOW_MARK_DEFAULT,
12175                                 };
12176
12177                                 if (flow_dv_convert_action_mark(dev, &mark,
12178                                                                 mhdr_res,
12179                                                                 error))
12180                                         return -rte_errno;
12181                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
12182                                 break;
12183                         }
12184                         tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
12185                         /*
12186                          * Only one FLAG or MARK is supported per device flow
12187                          * right now. So the pointer to the tag resource must be
12188                          * zero before the register process.
12189                          */
12190                         MLX5_ASSERT(!handle->dvh.rix_tag);
12191                         if (flow_dv_tag_resource_register(dev, tag_be,
12192                                                           dev_flow, error))
12193                                 return -rte_errno;
12194                         MLX5_ASSERT(dev_flow->dv.tag_resource);
12195                         dev_flow->dv.actions[actions_n++] =
12196                                         dev_flow->dv.tag_resource->action;
12197                         break;
12198                 case RTE_FLOW_ACTION_TYPE_MARK:
12199                         action_flags |= MLX5_FLOW_ACTION_MARK;
12200                         dev_flow->handle->mark = 1;
12201                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
12202                                 const struct rte_flow_action_mark *mark =
12203                                         (const struct rte_flow_action_mark *)
12204                                                 actions->conf;
12205
12206                                 if (flow_dv_convert_action_mark(dev, mark,
12207                                                                 mhdr_res,
12208                                                                 error))
12209                                         return -rte_errno;
12210                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
12211                                 break;
12212                         }
12213                         /* Fall-through */
12214                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
12215                         /* Legacy (non-extensive) MARK action. */
12216                         tag_be = mlx5_flow_mark_set
12217                               (((const struct rte_flow_action_mark *)
12218                                (actions->conf))->id);
12219                         MLX5_ASSERT(!handle->dvh.rix_tag);
12220                         if (flow_dv_tag_resource_register(dev, tag_be,
12221                                                           dev_flow, error))
12222                                 return -rte_errno;
12223                         MLX5_ASSERT(dev_flow->dv.tag_resource);
12224                         dev_flow->dv.actions[actions_n++] =
12225                                         dev_flow->dv.tag_resource->action;
12226                         break;
12227                 case RTE_FLOW_ACTION_TYPE_SET_META:
12228                         if (flow_dv_convert_action_set_meta
12229                                 (dev, mhdr_res, attr,
12230                                  (const struct rte_flow_action_set_meta *)
12231                                   actions->conf, error))
12232                                 return -rte_errno;
12233                         action_flags |= MLX5_FLOW_ACTION_SET_META;
12234                         break;
12235                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
12236                         if (flow_dv_convert_action_set_tag
12237                                 (dev, mhdr_res,
12238                                  (const struct rte_flow_action_set_tag *)
12239                                   actions->conf, error))
12240                                 return -rte_errno;
12241                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
12242                         break;
12243                 case RTE_FLOW_ACTION_TYPE_DROP:
12244                         action_flags |= MLX5_FLOW_ACTION_DROP;
12245                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
12246                         break;
12247                 case RTE_FLOW_ACTION_TYPE_QUEUE:
12248                         queue = actions->conf;
12249                         rss_desc->queue_num = 1;
12250                         rss_desc->queue[0] = queue->index;
12251                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
12252                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
12253                         sample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE;
12254                         num_of_dest++;
12255                         break;
12256                 case RTE_FLOW_ACTION_TYPE_RSS:
12257                         rss = actions->conf;
12258                         memcpy(rss_desc->queue, rss->queue,
12259                                rss->queue_num * sizeof(uint16_t));
12260                         rss_desc->queue_num = rss->queue_num;
12261                         /* NULL RSS key indicates default RSS key. */
12262                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
12263                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
12264                         /*
12265                          * rss->level and rss.types should be set in advance
12266                          * when expanding items for RSS.
12267                          */
12268                         action_flags |= MLX5_FLOW_ACTION_RSS;
12269                         dev_flow->handle->fate_action = rss_desc->shared_rss ?
12270                                 MLX5_FLOW_FATE_SHARED_RSS :
12271                                 MLX5_FLOW_FATE_QUEUE;
12272                         break;
12273                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
12274                         flow->age = (uint32_t)(uintptr_t)(action->conf);
12275                         age_act = flow_aso_age_get_by_idx(dev, flow->age);
12276                         __atomic_fetch_add(&age_act->refcnt, 1,
12277                                            __ATOMIC_RELAXED);
12278                         age_act_pos = actions_n++;
12279                         action_flags |= MLX5_FLOW_ACTION_AGE;
12280                         break;
12281                 case RTE_FLOW_ACTION_TYPE_AGE:
12282                         non_shared_age = action->conf;
12283                         age_act_pos = actions_n++;
12284                         action_flags |= MLX5_FLOW_ACTION_AGE;
12285                         break;
12286                 case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
12287                         flow->counter = (uint32_t)(uintptr_t)(action->conf);
12288                         cnt_act = flow_dv_counter_get_by_idx(dev, flow->counter,
12289                                                              NULL);
12290                         __atomic_fetch_add(&cnt_act->shared_info.refcnt, 1,
12291                                            __ATOMIC_RELAXED);
12292                         /* Save information first, will apply later. */
12293                         action_flags |= MLX5_FLOW_ACTION_COUNT;
12294                         break;
12295                 case RTE_FLOW_ACTION_TYPE_COUNT:
12296                         if (!dev_conf->devx) {
12297                                 return rte_flow_error_set
12298                                               (error, ENOTSUP,
12299                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12300                                                NULL,
12301                                                "count action not supported");
12302                         }
12303                         /* Save information first, will apply later. */
12304                         count = action->conf;
12305                         action_flags |= MLX5_FLOW_ACTION_COUNT;
12306                         break;
12307                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
12308                         dev_flow->dv.actions[actions_n++] =
12309                                                 priv->sh->pop_vlan_action;
12310                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
12311                         break;
12312                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
12313                         if (!(action_flags &
12314                               MLX5_FLOW_ACTION_OF_SET_VLAN_VID))
12315                                 flow_dev_get_vlan_info_from_items(items, &vlan);
12316                         vlan.eth_proto = rte_be_to_cpu_16
12317                              ((((const struct rte_flow_action_of_push_vlan *)
12318                                                    actions->conf)->ethertype));
12319                         found_action = mlx5_flow_find_action
12320                                         (actions + 1,
12321                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
12322                         if (found_action)
12323                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
12324                         found_action = mlx5_flow_find_action
12325                                         (actions + 1,
12326                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
12327                         if (found_action)
12328                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
12329                         if (flow_dv_create_action_push_vlan
12330                                             (dev, attr, &vlan, dev_flow, error))
12331                                 return -rte_errno;
12332                         dev_flow->dv.actions[actions_n++] =
12333                                         dev_flow->dv.push_vlan_res->action;
12334                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
12335                         break;
12336                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
12337                         /* of_vlan_push action handled this action */
12338                         MLX5_ASSERT(action_flags &
12339                                     MLX5_FLOW_ACTION_OF_PUSH_VLAN);
12340                         break;
12341                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
12342                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
12343                                 break;
12344                         flow_dev_get_vlan_info_from_items(items, &vlan);
12345                         mlx5_update_vlan_vid_pcp(actions, &vlan);
12346                         /* If no VLAN push - this is a modify header action */
12347                         if (flow_dv_convert_action_modify_vlan_vid
12348                                                 (mhdr_res, actions, error))
12349                                 return -rte_errno;
12350                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
12351                         break;
12352                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
12353                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
12354                         if (flow_dv_create_action_l2_encap(dev, actions,
12355                                                            dev_flow,
12356                                                            attr->transfer,
12357                                                            error))
12358                                 return -rte_errno;
12359                         dev_flow->dv.actions[actions_n++] =
12360                                         dev_flow->dv.encap_decap->action;
12361                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
12362                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
12363                                 sample_act->action_flags |=
12364                                                         MLX5_FLOW_ACTION_ENCAP;
12365                         break;
12366                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
12367                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
12368                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
12369                                                            attr->transfer,
12370                                                            error))
12371                                 return -rte_errno;
12372                         dev_flow->dv.actions[actions_n++] =
12373                                         dev_flow->dv.encap_decap->action;
12374                         action_flags |= MLX5_FLOW_ACTION_DECAP;
12375                         break;
12376                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
12377                         /* Handle encap with preceding decap. */
12378                         if (action_flags & MLX5_FLOW_ACTION_DECAP) {
12379                                 if (flow_dv_create_action_raw_encap
12380                                         (dev, actions, dev_flow, attr, error))
12381                                         return -rte_errno;
12382                                 dev_flow->dv.actions[actions_n++] =
12383                                         dev_flow->dv.encap_decap->action;
12384                         } else {
12385                                 /* Handle encap without preceding decap. */
12386                                 if (flow_dv_create_action_l2_encap
12387                                     (dev, actions, dev_flow, attr->transfer,
12388                                      error))
12389                                         return -rte_errno;
12390                                 dev_flow->dv.actions[actions_n++] =
12391                                         dev_flow->dv.encap_decap->action;
12392                         }
12393                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
12394                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
12395                                 sample_act->action_flags |=
12396                                                         MLX5_FLOW_ACTION_ENCAP;
12397                         break;
12398                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
12399                         while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
12400                                 ;
12401                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
12402                                 if (flow_dv_create_action_l2_decap
12403                                     (dev, dev_flow, attr->transfer, error))
12404                                         return -rte_errno;
12405                                 dev_flow->dv.actions[actions_n++] =
12406                                         dev_flow->dv.encap_decap->action;
12407                         }
12408                         /* If decap is followed by encap, handle it at encap. */
12409                         action_flags |= MLX5_FLOW_ACTION_DECAP;
12410                         break;
12411                 case MLX5_RTE_FLOW_ACTION_TYPE_JUMP:
12412                         dev_flow->dv.actions[actions_n++] =
12413                                 (void *)(uintptr_t)action->conf;
12414                         action_flags |= MLX5_FLOW_ACTION_JUMP;
12415                         break;
12416                 case RTE_FLOW_ACTION_TYPE_JUMP:
12417                         jump_group = ((const struct rte_flow_action_jump *)
12418                                                         action->conf)->group;
12419                         grp_info.std_tbl_fix = 0;
12420                         if (dev_flow->skip_scale &
12421                                 (1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT))
12422                                 grp_info.skip_scale = 1;
12423                         else
12424                                 grp_info.skip_scale = 0;
12425                         ret = mlx5_flow_group_to_table(dev, tunnel,
12426                                                        jump_group,
12427                                                        &table,
12428                                                        &grp_info, error);
12429                         if (ret)
12430                                 return ret;
12431                         tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
12432                                                        attr->transfer,
12433                                                        !!dev_flow->external,
12434                                                        tunnel, jump_group, 0,
12435                                                        0, error);
12436                         if (!tbl)
12437                                 return rte_flow_error_set
12438                                                 (error, errno,
12439                                                  RTE_FLOW_ERROR_TYPE_ACTION,
12440                                                  NULL,
12441                                                  "cannot create jump action.");
12442                         if (flow_dv_jump_tbl_resource_register
12443                             (dev, tbl, dev_flow, error)) {
12444                                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
12445                                 return rte_flow_error_set
12446                                                 (error, errno,
12447                                                  RTE_FLOW_ERROR_TYPE_ACTION,
12448                                                  NULL,
12449                                                  "cannot create jump action.");
12450                         }
12451                         dev_flow->dv.actions[actions_n++] =
12452                                         dev_flow->dv.jump->action;
12453                         action_flags |= MLX5_FLOW_ACTION_JUMP;
12454                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;
12455                         sample_act->action_flags |= MLX5_FLOW_ACTION_JUMP;
12456                         num_of_dest++;
12457                         break;
12458                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
12459                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
12460                         if (flow_dv_convert_action_modify_mac
12461                                         (mhdr_res, actions, error))
12462                                 return -rte_errno;
12463                         action_flags |= actions->type ==
12464                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
12465                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
12466                                         MLX5_FLOW_ACTION_SET_MAC_DST;
12467                         break;
12468                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
12469                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
12470                         if (flow_dv_convert_action_modify_ipv4
12471                                         (mhdr_res, actions, error))
12472                                 return -rte_errno;
12473                         action_flags |= actions->type ==
12474                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
12475                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
12476                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
12477                         break;
12478                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
12479                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
12480                         if (flow_dv_convert_action_modify_ipv6
12481                                         (mhdr_res, actions, error))
12482                                 return -rte_errno;
12483                         action_flags |= actions->type ==
12484                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
12485                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
12486                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
12487                         break;
12488                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
12489                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
12490                         if (flow_dv_convert_action_modify_tp
12491                                         (mhdr_res, actions, items,
12492                                          &flow_attr, dev_flow, !!(action_flags &
12493                                          MLX5_FLOW_ACTION_DECAP), error))
12494                                 return -rte_errno;
12495                         action_flags |= actions->type ==
12496                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
12497                                         MLX5_FLOW_ACTION_SET_TP_SRC :
12498                                         MLX5_FLOW_ACTION_SET_TP_DST;
12499                         break;
12500                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
12501                         if (flow_dv_convert_action_modify_dec_ttl
12502                                         (mhdr_res, items, &flow_attr, dev_flow,
12503                                          !!(action_flags &
12504                                          MLX5_FLOW_ACTION_DECAP), error))
12505                                 return -rte_errno;
12506                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
12507                         break;
12508                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
12509                         if (flow_dv_convert_action_modify_ttl
12510                                         (mhdr_res, actions, items, &flow_attr,
12511                                          dev_flow, !!(action_flags &
12512                                          MLX5_FLOW_ACTION_DECAP), error))
12513                                 return -rte_errno;
12514                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
12515                         break;
12516                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
12517                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
12518                         if (flow_dv_convert_action_modify_tcp_seq
12519                                         (mhdr_res, actions, error))
12520                                 return -rte_errno;
12521                         action_flags |= actions->type ==
12522                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
12523                                         MLX5_FLOW_ACTION_INC_TCP_SEQ :
12524                                         MLX5_FLOW_ACTION_DEC_TCP_SEQ;
12525                         break;
12526
12527                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
12528                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
12529                         if (flow_dv_convert_action_modify_tcp_ack
12530                                         (mhdr_res, actions, error))
12531                                 return -rte_errno;
12532                         action_flags |= actions->type ==
12533                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
12534                                         MLX5_FLOW_ACTION_INC_TCP_ACK :
12535                                         MLX5_FLOW_ACTION_DEC_TCP_ACK;
12536                         break;
12537                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
12538                         if (flow_dv_convert_action_set_reg
12539                                         (mhdr_res, actions, error))
12540                                 return -rte_errno;
12541                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
12542                         break;
12543                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
12544                         if (flow_dv_convert_action_copy_mreg
12545                                         (dev, mhdr_res, actions, error))
12546                                 return -rte_errno;
12547                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
12548                         break;
12549                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
12550                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
12551                         dev_flow->handle->fate_action =
12552                                         MLX5_FLOW_FATE_DEFAULT_MISS;
12553                         break;
12554                 case RTE_FLOW_ACTION_TYPE_METER:
12555                         if (!wks->fm)
12556                                 return rte_flow_error_set(error, rte_errno,
12557                                         RTE_FLOW_ERROR_TYPE_ACTION,
12558                                         NULL, "Failed to get meter in flow.");
12559                         /* Set the meter action. */
12560                         dev_flow->dv.actions[actions_n++] =
12561                                 wks->fm->meter_action;
12562                         action_flags |= MLX5_FLOW_ACTION_METER;
12563                         break;
12564                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
12565                         if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,
12566                                                               actions, error))
12567                                 return -rte_errno;
12568                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
12569                         break;
12570                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
12571                         if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,
12572                                                               actions, error))
12573                                 return -rte_errno;
12574                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
12575                         break;
12576                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
12577                         sample_act_pos = actions_n;
12578                         sample = (const struct rte_flow_action_sample *)
12579                                  action->conf;
12580                         actions_n++;
12581                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
12582                         /* put encap action into group if work with port id */
12583                         if ((action_flags & MLX5_FLOW_ACTION_ENCAP) &&
12584                             (action_flags & MLX5_FLOW_ACTION_PORT_ID))
12585                                 sample_act->action_flags |=
12586                                                         MLX5_FLOW_ACTION_ENCAP;
12587                         break;
12588                 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
12589                         if (flow_dv_convert_action_modify_field
12590                                         (dev, mhdr_res, actions, attr, error))
12591                                 return -rte_errno;
12592                         action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
12593                         break;
12594                 case RTE_FLOW_ACTION_TYPE_CONNTRACK:
12595                         owner_idx = (uint32_t)(uintptr_t)action->conf;
12596                         ct = flow_aso_ct_get_by_idx(dev, owner_idx);
12597                         if (!ct)
12598                                 return rte_flow_error_set(error, EINVAL,
12599                                                 RTE_FLOW_ERROR_TYPE_ACTION,
12600                                                 NULL,
12601                                                 "Failed to get CT object.");
12602                         if (mlx5_aso_ct_available(priv->sh, ct))
12603                                 return rte_flow_error_set(error, rte_errno,
12604                                                 RTE_FLOW_ERROR_TYPE_ACTION,
12605                                                 NULL,
12606                                                 "CT is unavailable.");
12607                         if (ct->is_original)
12608                                 dev_flow->dv.actions[actions_n] =
12609                                                         ct->dr_action_orig;
12610                         else
12611                                 dev_flow->dv.actions[actions_n] =
12612                                                         ct->dr_action_rply;
12613                         flow->indirect_type = MLX5_INDIRECT_ACTION_TYPE_CT;
12614                         flow->ct = owner_idx;
12615                         __atomic_fetch_add(&ct->refcnt, 1, __ATOMIC_RELAXED);
12616                         actions_n++;
12617                         action_flags |= MLX5_FLOW_ACTION_CT;
12618                         break;
12619                 case RTE_FLOW_ACTION_TYPE_END:
12620                         actions_end = true;
12621                         if (mhdr_res->actions_num) {
12622                                 /* create modify action if needed. */
12623                                 if (flow_dv_modify_hdr_resource_register
12624                                         (dev, mhdr_res, dev_flow, error))
12625                                         return -rte_errno;
12626                                 dev_flow->dv.actions[modify_action_position] =
12627                                         handle->dvh.modify_hdr->action;
12628                         }
12629                         /*
12630                          * Handle AGE and COUNT action by single HW counter
12631                          * when they are not shared.
12632                          */
12633                         if (action_flags & MLX5_FLOW_ACTION_AGE) {
12634                                 if ((non_shared_age &&
12635                                      count && !count->shared) ||
12636                                     !(priv->sh->flow_hit_aso_en &&
12637                                       (attr->group || attr->transfer))) {
12638                                         /* Creates age by counters. */
12639                                         cnt_act = flow_dv_prepare_counter
12640                                                                 (dev, dev_flow,
12641                                                                  flow, count,
12642                                                                  non_shared_age,
12643                                                                  error);
12644                                         if (!cnt_act)
12645                                                 return -rte_errno;
12646                                         dev_flow->dv.actions[age_act_pos] =
12647                                                                 cnt_act->action;
12648                                         break;
12649                                 }
12650                                 if (!flow->age && non_shared_age) {
12651                                         flow->age = flow_dv_aso_age_alloc
12652                                                                 (dev, error);
12653                                         if (!flow->age)
12654                                                 return -rte_errno;
12655                                         flow_dv_aso_age_params_init
12656                                                     (dev, flow->age,
12657                                                      non_shared_age->context ?
12658                                                      non_shared_age->context :
12659                                                      (void *)(uintptr_t)
12660                                                      (dev_flow->flow_idx),
12661                                                      non_shared_age->timeout);
12662                                 }
12663                                 age_act = flow_aso_age_get_by_idx(dev,
12664                                                                   flow->age);
12665                                 dev_flow->dv.actions[age_act_pos] =
12666                                                              age_act->dr_action;
12667                         }
12668                         if (action_flags & MLX5_FLOW_ACTION_COUNT) {
12669                                 /*
12670                                  * Create one count action, to be used
12671                                  * by all sub-flows.
12672                                  */
12673                                 cnt_act = flow_dv_prepare_counter(dev, dev_flow,
12674                                                                   flow, count,
12675                                                                   NULL, error);
12676                                 if (!cnt_act)
12677                                         return -rte_errno;
12678                                 dev_flow->dv.actions[actions_n++] =
12679                                                                 cnt_act->action;
12680                         }
12681                 default:
12682                         break;
12683                 }
12684                 if (mhdr_res->actions_num &&
12685                     modify_action_position == UINT32_MAX)
12686                         modify_action_position = actions_n++;
12687         }
12688         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
12689                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
12690                 int item_type = items->type;
12691
12692                 if (!mlx5_flow_os_item_supported(item_type))
12693                         return rte_flow_error_set(error, ENOTSUP,
12694                                                   RTE_FLOW_ERROR_TYPE_ITEM,
12695                                                   NULL, "item not supported");
12696                 switch (item_type) {
12697                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
12698                         flow_dv_translate_item_port_id
12699                                 (dev, match_mask, match_value, items, attr);
12700                         last_item = MLX5_FLOW_ITEM_PORT_ID;
12701                         break;
12702                 case RTE_FLOW_ITEM_TYPE_ETH:
12703                         flow_dv_translate_item_eth(match_mask, match_value,
12704                                                    items, tunnel,
12705                                                    dev_flow->dv.group);
12706                         matcher.priority = action_flags &
12707                                         MLX5_FLOW_ACTION_DEFAULT_MISS &&
12708                                         !dev_flow->external ?
12709                                         MLX5_PRIORITY_MAP_L3 :
12710                                         MLX5_PRIORITY_MAP_L2;
12711                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
12712                                              MLX5_FLOW_LAYER_OUTER_L2;
12713                         break;
12714                 case RTE_FLOW_ITEM_TYPE_VLAN:
12715                         flow_dv_translate_item_vlan(dev_flow,
12716                                                     match_mask, match_value,
12717                                                     items, tunnel,
12718                                                     dev_flow->dv.group);
12719                         matcher.priority = MLX5_PRIORITY_MAP_L2;
12720                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
12721                                               MLX5_FLOW_LAYER_INNER_VLAN) :
12722                                              (MLX5_FLOW_LAYER_OUTER_L2 |
12723                                               MLX5_FLOW_LAYER_OUTER_VLAN);
12724                         break;
12725                 case RTE_FLOW_ITEM_TYPE_IPV4:
12726                         mlx5_flow_tunnel_ip_check(items, next_protocol,
12727                                                   &item_flags, &tunnel);
12728                         flow_dv_translate_item_ipv4(match_mask, match_value,
12729                                                     items, tunnel,
12730                                                     dev_flow->dv.group);
12731                         matcher.priority = MLX5_PRIORITY_MAP_L3;
12732                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
12733                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
12734                         if (items->mask != NULL &&
12735                             ((const struct rte_flow_item_ipv4 *)
12736                              items->mask)->hdr.next_proto_id) {
12737                                 next_protocol =
12738                                         ((const struct rte_flow_item_ipv4 *)
12739                                          (items->spec))->hdr.next_proto_id;
12740                                 next_protocol &=
12741                                         ((const struct rte_flow_item_ipv4 *)
12742                                          (items->mask))->hdr.next_proto_id;
12743                         } else {
12744                                 /* Reset for inner layer. */
12745                                 next_protocol = 0xff;
12746                         }
12747                         break;
12748                 case RTE_FLOW_ITEM_TYPE_IPV6:
12749                         mlx5_flow_tunnel_ip_check(items, next_protocol,
12750                                                   &item_flags, &tunnel);
12751                         flow_dv_translate_item_ipv6(match_mask, match_value,
12752                                                     items, tunnel,
12753                                                     dev_flow->dv.group);
12754                         matcher.priority = MLX5_PRIORITY_MAP_L3;
12755                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
12756                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
12757                         if (items->mask != NULL &&
12758                             ((const struct rte_flow_item_ipv6 *)
12759                              items->mask)->hdr.proto) {
12760                                 next_protocol =
12761                                         ((const struct rte_flow_item_ipv6 *)
12762                                          items->spec)->hdr.proto;
12763                                 next_protocol &=
12764                                         ((const struct rte_flow_item_ipv6 *)
12765                                          items->mask)->hdr.proto;
12766                         } else {
12767                                 /* Reset for inner layer. */
12768                                 next_protocol = 0xff;
12769                         }
12770                         break;
12771                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
12772                         flow_dv_translate_item_ipv6_frag_ext(match_mask,
12773                                                              match_value,
12774                                                              items, tunnel);
12775                         last_item = tunnel ?
12776                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
12777                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
12778                         if (items->mask != NULL &&
12779                             ((const struct rte_flow_item_ipv6_frag_ext *)
12780                              items->mask)->hdr.next_header) {
12781                                 next_protocol =
12782                                 ((const struct rte_flow_item_ipv6_frag_ext *)
12783                                  items->spec)->hdr.next_header;
12784                                 next_protocol &=
12785                                 ((const struct rte_flow_item_ipv6_frag_ext *)
12786                                  items->mask)->hdr.next_header;
12787                         } else {
12788                                 /* Reset for inner layer. */
12789                                 next_protocol = 0xff;
12790                         }
12791                         break;
12792                 case RTE_FLOW_ITEM_TYPE_TCP:
12793                         flow_dv_translate_item_tcp(match_mask, match_value,
12794                                                    items, tunnel);
12795                         matcher.priority = MLX5_PRIORITY_MAP_L4;
12796                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
12797                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
12798                         break;
12799                 case RTE_FLOW_ITEM_TYPE_UDP:
12800                         flow_dv_translate_item_udp(match_mask, match_value,
12801                                                    items, tunnel);
12802                         matcher.priority = MLX5_PRIORITY_MAP_L4;
12803                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
12804                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
12805                         break;
12806                 case RTE_FLOW_ITEM_TYPE_GRE:
12807                         flow_dv_translate_item_gre(match_mask, match_value,
12808                                                    items, tunnel);
12809                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
12810                         last_item = MLX5_FLOW_LAYER_GRE;
12811                         break;
12812                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
12813                         flow_dv_translate_item_gre_key(match_mask,
12814                                                        match_value, items);
12815                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
12816                         break;
12817                 case RTE_FLOW_ITEM_TYPE_NVGRE:
12818                         flow_dv_translate_item_nvgre(match_mask, match_value,
12819                                                      items, tunnel);
12820                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
12821                         last_item = MLX5_FLOW_LAYER_GRE;
12822                         break;
12823                 case RTE_FLOW_ITEM_TYPE_VXLAN:
12824                         flow_dv_translate_item_vxlan(match_mask, match_value,
12825                                                      items, tunnel);
12826                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
12827                         last_item = MLX5_FLOW_LAYER_VXLAN;
12828                         break;
12829                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
12830                         flow_dv_translate_item_vxlan_gpe(match_mask,
12831                                                          match_value, items,
12832                                                          tunnel);
12833                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
12834                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
12835                         break;
12836                 case RTE_FLOW_ITEM_TYPE_GENEVE:
12837                         flow_dv_translate_item_geneve(match_mask, match_value,
12838                                                       items, tunnel);
12839                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
12840                         last_item = MLX5_FLOW_LAYER_GENEVE;
12841                         break;
12842                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
12843                         ret = flow_dv_translate_item_geneve_opt(dev, match_mask,
12844                                                           match_value,
12845                                                           items, error);
12846                         if (ret)
12847                                 return rte_flow_error_set(error, -ret,
12848                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
12849                                         "cannot create GENEVE TLV option");
12850                         flow->geneve_tlv_option = 1;
12851                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
12852                         break;
12853                 case RTE_FLOW_ITEM_TYPE_MPLS:
12854                         flow_dv_translate_item_mpls(match_mask, match_value,
12855                                                     items, last_item, tunnel);
12856                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
12857                         last_item = MLX5_FLOW_LAYER_MPLS;
12858                         break;
12859                 case RTE_FLOW_ITEM_TYPE_MARK:
12860                         flow_dv_translate_item_mark(dev, match_mask,
12861                                                     match_value, items);
12862                         last_item = MLX5_FLOW_ITEM_MARK;
12863                         break;
12864                 case RTE_FLOW_ITEM_TYPE_META:
12865                         flow_dv_translate_item_meta(dev, match_mask,
12866                                                     match_value, attr, items);
12867                         last_item = MLX5_FLOW_ITEM_METADATA;
12868                         break;
12869                 case RTE_FLOW_ITEM_TYPE_ICMP:
12870                         flow_dv_translate_item_icmp(match_mask, match_value,
12871                                                     items, tunnel);
12872                         last_item = MLX5_FLOW_LAYER_ICMP;
12873                         break;
12874                 case RTE_FLOW_ITEM_TYPE_ICMP6:
12875                         flow_dv_translate_item_icmp6(match_mask, match_value,
12876                                                       items, tunnel);
12877                         last_item = MLX5_FLOW_LAYER_ICMP6;
12878                         break;
12879                 case RTE_FLOW_ITEM_TYPE_TAG:
12880                         flow_dv_translate_item_tag(dev, match_mask,
12881                                                    match_value, items);
12882                         last_item = MLX5_FLOW_ITEM_TAG;
12883                         break;
12884                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
12885                         flow_dv_translate_mlx5_item_tag(dev, match_mask,
12886                                                         match_value, items);
12887                         last_item = MLX5_FLOW_ITEM_TAG;
12888                         break;
12889                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
12890                         flow_dv_translate_item_tx_queue(dev, match_mask,
12891                                                         match_value,
12892                                                         items);
12893                         last_item = MLX5_FLOW_ITEM_TX_QUEUE;
12894                         break;
12895                 case RTE_FLOW_ITEM_TYPE_GTP:
12896                         flow_dv_translate_item_gtp(match_mask, match_value,
12897                                                    items, tunnel);
12898                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
12899                         last_item = MLX5_FLOW_LAYER_GTP;
12900                         break;
12901                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
12902                         ret = flow_dv_translate_item_gtp_psc(match_mask,
12903                                                           match_value,
12904                                                           items);
12905                         if (ret)
12906                                 return rte_flow_error_set(error, -ret,
12907                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
12908                                         "cannot create GTP PSC item");
12909                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
12910                         break;
12911                 case RTE_FLOW_ITEM_TYPE_ECPRI:
12912                         if (!mlx5_flex_parser_ecpri_exist(dev)) {
12913                                 /* Create it only the first time to be used. */
12914                                 ret = mlx5_flex_parser_ecpri_alloc(dev);
12915                                 if (ret)
12916                                         return rte_flow_error_set
12917                                                 (error, -ret,
12918                                                 RTE_FLOW_ERROR_TYPE_ITEM,
12919                                                 NULL,
12920                                                 "cannot create eCPRI parser");
12921                         }
12922                         /* Adjust the length matcher and device flow value. */
12923                         matcher.mask.size = MLX5_ST_SZ_BYTES(fte_match_param);
12924                         dev_flow->dv.value.size =
12925                                         MLX5_ST_SZ_BYTES(fte_match_param);
12926                         flow_dv_translate_item_ecpri(dev, match_mask,
12927                                                      match_value, items);
12928                         /* No other protocol should follow eCPRI layer. */
12929                         last_item = MLX5_FLOW_LAYER_ECPRI;
12930                         break;
12931                 case RTE_FLOW_ITEM_TYPE_INTEGRITY:
12932                         flow_dv_translate_item_integrity(match_mask,
12933                                                          match_value,
12934                                                          head_item, items);
12935                         break;
12936                 case RTE_FLOW_ITEM_TYPE_CONNTRACK:
12937                         flow_dv_translate_item_aso_ct(dev, match_mask,
12938                                                       match_value, items);
12939                         break;
12940                 default:
12941                         break;
12942                 }
12943                 item_flags |= last_item;
12944         }
12945         /*
12946          * When E-Switch mode is enabled, we have two cases where we need to
12947          * set the source port manually.
12948          * The first one, is in case of Nic steering rule, and the second is
12949          * E-Switch rule where no port_id item was found. In both cases
12950          * the source port is set according the current port in use.
12951          */
12952         if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
12953             (priv->representor || priv->master)) {
12954                 if (flow_dv_translate_item_port_id(dev, match_mask,
12955                                                    match_value, NULL, attr))
12956                         return -rte_errno;
12957         }
12958 #ifdef RTE_LIBRTE_MLX5_DEBUG
12959         MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
12960                                               dev_flow->dv.value.buf));
12961 #endif
12962         /*
12963          * Layers may be already initialized from prefix flow if this dev_flow
12964          * is the suffix flow.
12965          */
12966         handle->layers |= item_flags;
12967         if (action_flags & MLX5_FLOW_ACTION_RSS)
12968                 flow_dv_hashfields_set(dev_flow, rss_desc);
12969         /* If has RSS action in the sample action, the Sample/Mirror resource
12970          * should be registered after the hash filed be update.
12971          */
12972         if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
12973                 ret = flow_dv_translate_action_sample(dev,
12974                                                       sample,
12975                                                       dev_flow, attr,
12976                                                       &num_of_dest,
12977                                                       sample_actions,
12978                                                       &sample_res,
12979                                                       error);
12980                 if (ret < 0)
12981                         return ret;
12982                 ret = flow_dv_create_action_sample(dev,
12983                                                    dev_flow,
12984                                                    num_of_dest,
12985                                                    &sample_res,
12986                                                    &mdest_res,
12987                                                    sample_actions,
12988                                                    action_flags,
12989                                                    error);
12990                 if (ret < 0)
12991                         return rte_flow_error_set
12992                                                 (error, rte_errno,
12993                                                 RTE_FLOW_ERROR_TYPE_ACTION,
12994                                                 NULL,
12995                                                 "cannot create sample action");
12996                 if (num_of_dest > 1) {
12997                         dev_flow->dv.actions[sample_act_pos] =
12998                         dev_flow->dv.dest_array_res->action;
12999                 } else {
13000                         dev_flow->dv.actions[sample_act_pos] =
13001                         dev_flow->dv.sample_res->verbs_action;
13002                 }
13003         }
13004         /*
13005          * For multiple destination (sample action with ratio=1), the encap
13006          * action and port id action will be combined into group action.
13007          * So need remove the original these actions in the flow and only
13008          * use the sample action instead of.
13009          */
13010         if (num_of_dest > 1 &&
13011             (sample_act->dr_port_id_action || sample_act->dr_jump_action)) {
13012                 int i;
13013                 void *temp_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
13014
13015                 for (i = 0; i < actions_n; i++) {
13016                         if ((sample_act->dr_encap_action &&
13017                                 sample_act->dr_encap_action ==
13018                                 dev_flow->dv.actions[i]) ||
13019                                 (sample_act->dr_port_id_action &&
13020                                 sample_act->dr_port_id_action ==
13021                                 dev_flow->dv.actions[i]) ||
13022                                 (sample_act->dr_jump_action &&
13023                                 sample_act->dr_jump_action ==
13024                                 dev_flow->dv.actions[i]))
13025                                 continue;
13026                         temp_actions[tmp_actions_n++] = dev_flow->dv.actions[i];
13027                 }
13028                 memcpy((void *)dev_flow->dv.actions,
13029                                 (void *)temp_actions,
13030                                 tmp_actions_n * sizeof(void *));
13031                 actions_n = tmp_actions_n;
13032         }
13033         dev_flow->dv.actions_n = actions_n;
13034         dev_flow->act_flags = action_flags;
13035         if (wks->skip_matcher_reg)
13036                 return 0;
13037         /* Register matcher. */
13038         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
13039                                     matcher.mask.size);
13040         matcher.priority = mlx5_get_matcher_priority(dev, attr,
13041                                         matcher.priority);
13042         /* reserved field no needs to be set to 0 here. */
13043         tbl_key.is_fdb = attr->transfer;
13044         tbl_key.is_egress = attr->egress;
13045         tbl_key.level = dev_flow->dv.group;
13046         tbl_key.id = dev_flow->dv.table_id;
13047         if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow,
13048                                      tunnel, attr->group, error))
13049                 return -rte_errno;
13050         return 0;
13051 }
13052
13053 /**
13054  * Set hash RX queue by hash fields (see enum ibv_rx_hash_fields)
13055  * and tunnel.
13056  *
13057  * @param[in, out] action
13058  *   Shred RSS action holding hash RX queue objects.
13059  * @param[in] hash_fields
13060  *   Defines combination of packet fields to participate in RX hash.
13061  * @param[in] tunnel
13062  *   Tunnel type
13063  * @param[in] hrxq_idx
13064  *   Hash RX queue index to set.
13065  *
13066  * @return
13067  *   0 on success, otherwise negative errno value.
13068  */
13069 static int
13070 __flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action,
13071                               const uint64_t hash_fields,
13072                               uint32_t hrxq_idx)
13073 {
13074         uint32_t *hrxqs = action->hrxq;
13075
13076         switch (hash_fields & ~IBV_RX_HASH_INNER) {
13077         case MLX5_RSS_HASH_IPV4:
13078                 /* fall-through. */
13079         case MLX5_RSS_HASH_IPV4_DST_ONLY:
13080                 /* fall-through. */
13081         case MLX5_RSS_HASH_IPV4_SRC_ONLY:
13082                 hrxqs[0] = hrxq_idx;
13083                 return 0;
13084         case MLX5_RSS_HASH_IPV4_TCP:
13085                 /* fall-through. */
13086         case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
13087                 /* fall-through. */
13088         case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
13089                 hrxqs[1] = hrxq_idx;
13090                 return 0;
13091         case MLX5_RSS_HASH_IPV4_UDP:
13092                 /* fall-through. */
13093         case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
13094                 /* fall-through. */
13095         case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
13096                 hrxqs[2] = hrxq_idx;
13097                 return 0;
13098         case MLX5_RSS_HASH_IPV6:
13099                 /* fall-through. */
13100         case MLX5_RSS_HASH_IPV6_DST_ONLY:
13101                 /* fall-through. */
13102         case MLX5_RSS_HASH_IPV6_SRC_ONLY:
13103                 hrxqs[3] = hrxq_idx;
13104                 return 0;
13105         case MLX5_RSS_HASH_IPV6_TCP:
13106                 /* fall-through. */
13107         case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
13108                 /* fall-through. */
13109         case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
13110                 hrxqs[4] = hrxq_idx;
13111                 return 0;
13112         case MLX5_RSS_HASH_IPV6_UDP:
13113                 /* fall-through. */
13114         case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
13115                 /* fall-through. */
13116         case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
13117                 hrxqs[5] = hrxq_idx;
13118                 return 0;
13119         case MLX5_RSS_HASH_NONE:
13120                 hrxqs[6] = hrxq_idx;
13121                 return 0;
13122         default:
13123                 return -1;
13124         }
13125 }
13126
13127 /**
13128  * Look up for hash RX queue by hash fields (see enum ibv_rx_hash_fields)
13129  * and tunnel.
13130  *
13131  * @param[in] dev
13132  *   Pointer to the Ethernet device structure.
13133  * @param[in] idx
13134  *   Shared RSS action ID holding hash RX queue objects.
13135  * @param[in] hash_fields
13136  *   Defines combination of packet fields to participate in RX hash.
13137  * @param[in] tunnel
13138  *   Tunnel type
13139  *
13140  * @return
13141  *   Valid hash RX queue index, otherwise 0.
13142  */
13143 static uint32_t
13144 __flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
13145                                  const uint64_t hash_fields)
13146 {
13147         struct mlx5_priv *priv = dev->data->dev_private;
13148         struct mlx5_shared_action_rss *shared_rss =
13149             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
13150         const uint32_t *hrxqs = shared_rss->hrxq;
13151
13152         switch (hash_fields & ~IBV_RX_HASH_INNER) {
13153         case MLX5_RSS_HASH_IPV4:
13154                 /* fall-through. */
13155         case MLX5_RSS_HASH_IPV4_DST_ONLY:
13156                 /* fall-through. */
13157         case MLX5_RSS_HASH_IPV4_SRC_ONLY:
13158                 return hrxqs[0];
13159         case MLX5_RSS_HASH_IPV4_TCP:
13160                 /* fall-through. */
13161         case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
13162                 /* fall-through. */
13163         case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
13164                 return hrxqs[1];
13165         case MLX5_RSS_HASH_IPV4_UDP:
13166                 /* fall-through. */
13167         case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
13168                 /* fall-through. */
13169         case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
13170                 return hrxqs[2];
13171         case MLX5_RSS_HASH_IPV6:
13172                 /* fall-through. */
13173         case MLX5_RSS_HASH_IPV6_DST_ONLY:
13174                 /* fall-through. */
13175         case MLX5_RSS_HASH_IPV6_SRC_ONLY:
13176                 return hrxqs[3];
13177         case MLX5_RSS_HASH_IPV6_TCP:
13178                 /* fall-through. */
13179         case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
13180                 /* fall-through. */
13181         case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
13182                 return hrxqs[4];
13183         case MLX5_RSS_HASH_IPV6_UDP:
13184                 /* fall-through. */
13185         case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
13186                 /* fall-through. */
13187         case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
13188                 return hrxqs[5];
13189         case MLX5_RSS_HASH_NONE:
13190                 return hrxqs[6];
13191         default:
13192                 return 0;
13193         }
13194
13195 }
13196
13197 /**
13198  * Apply the flow to the NIC, lock free,
13199  * (mutex should be acquired by caller).
13200  *
13201  * @param[in] dev
13202  *   Pointer to the Ethernet device structure.
13203  * @param[in, out] flow
13204  *   Pointer to flow structure.
13205  * @param[out] error
13206  *   Pointer to error structure.
13207  *
13208  * @return
13209  *   0 on success, a negative errno value otherwise and rte_errno is set.
13210  */
13211 static int
13212 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
13213               struct rte_flow_error *error)
13214 {
13215         struct mlx5_flow_dv_workspace *dv;
13216         struct mlx5_flow_handle *dh;
13217         struct mlx5_flow_handle_dv *dv_h;
13218         struct mlx5_flow *dev_flow;
13219         struct mlx5_priv *priv = dev->data->dev_private;
13220         uint32_t handle_idx;
13221         int n;
13222         int err;
13223         int idx;
13224         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
13225         struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
13226
13227         MLX5_ASSERT(wks);
13228         for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
13229                 dev_flow = &wks->flows[idx];
13230                 dv = &dev_flow->dv;
13231                 dh = dev_flow->handle;
13232                 dv_h = &dh->dvh;
13233                 n = dv->actions_n;
13234                 if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
13235                         if (dv->transfer) {
13236                                 MLX5_ASSERT(priv->sh->dr_drop_action);
13237                                 dv->actions[n++] = priv->sh->dr_drop_action;
13238                         } else {
13239 #ifdef HAVE_MLX5DV_DR
13240                                 /* DR supports drop action placeholder. */
13241                                 MLX5_ASSERT(priv->sh->dr_drop_action);
13242                                 dv->actions[n++] = priv->sh->dr_drop_action;
13243 #else
13244                                 /* For DV we use the explicit drop queue. */
13245                                 MLX5_ASSERT(priv->drop_queue.hrxq);
13246                                 dv->actions[n++] =
13247                                                 priv->drop_queue.hrxq->action;
13248 #endif
13249                         }
13250                 } else if ((dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
13251                            !dv_h->rix_sample && !dv_h->rix_dest_array)) {
13252                         struct mlx5_hrxq *hrxq;
13253                         uint32_t hrxq_idx;
13254
13255                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
13256                                                     &hrxq_idx);
13257                         if (!hrxq) {
13258                                 rte_flow_error_set
13259                                         (error, rte_errno,
13260                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13261                                          "cannot get hash queue");
13262                                 goto error;
13263                         }
13264                         dh->rix_hrxq = hrxq_idx;
13265                         dv->actions[n++] = hrxq->action;
13266                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
13267                         struct mlx5_hrxq *hrxq = NULL;
13268                         uint32_t hrxq_idx;
13269
13270                         hrxq_idx = __flow_dv_action_rss_hrxq_lookup(dev,
13271                                                 rss_desc->shared_rss,
13272                                                 dev_flow->hash_fields);
13273                         if (hrxq_idx)
13274                                 hrxq = mlx5_ipool_get
13275                                         (priv->sh->ipool[MLX5_IPOOL_HRXQ],
13276                                          hrxq_idx);
13277                         if (!hrxq) {
13278                                 rte_flow_error_set
13279                                         (error, rte_errno,
13280                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13281                                          "cannot get hash queue");
13282                                 goto error;
13283                         }
13284                         dh->rix_srss = rss_desc->shared_rss;
13285                         dv->actions[n++] = hrxq->action;
13286                 } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
13287                         if (!priv->sh->default_miss_action) {
13288                                 rte_flow_error_set
13289                                         (error, rte_errno,
13290                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13291                                          "default miss action not be created.");
13292                                 goto error;
13293                         }
13294                         dv->actions[n++] = priv->sh->default_miss_action;
13295                 }
13296                 err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object,
13297                                                (void *)&dv->value, n,
13298                                                dv->actions, &dh->drv_flow);
13299                 if (err) {
13300                         rte_flow_error_set(error, errno,
13301                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13302                                            NULL,
13303                                            "hardware refuses to create flow");
13304                         goto error;
13305                 }
13306                 if (priv->vmwa_context &&
13307                     dh->vf_vlan.tag && !dh->vf_vlan.created) {
13308                         /*
13309                          * The rule contains the VLAN pattern.
13310                          * For VF we are going to create VLAN
13311                          * interface to make hypervisor set correct
13312                          * e-Switch vport context.
13313                          */
13314                         mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
13315                 }
13316         }
13317         return 0;
13318 error:
13319         err = rte_errno; /* Save rte_errno before cleanup. */
13320         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
13321                        handle_idx, dh, next) {
13322                 /* hrxq is union, don't clear it if the flag is not set. */
13323                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) {
13324                         mlx5_hrxq_release(dev, dh->rix_hrxq);
13325                         dh->rix_hrxq = 0;
13326                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
13327                         dh->rix_srss = 0;
13328                 }
13329                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
13330                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
13331         }
13332         rte_errno = err; /* Restore rte_errno. */
13333         return -rte_errno;
13334 }
13335
13336 void
13337 flow_dv_matcher_remove_cb(struct mlx5_cache_list *list __rte_unused,
13338                           struct mlx5_cache_entry *entry)
13339 {
13340         struct mlx5_flow_dv_matcher *cache = container_of(entry, typeof(*cache),
13341                                                           entry);
13342
13343         claim_zero(mlx5_flow_os_destroy_flow_matcher(cache->matcher_object));
13344         mlx5_free(cache);
13345 }
13346
13347 /**
13348  * Release the flow matcher.
13349  *
13350  * @param dev
13351  *   Pointer to Ethernet device.
13352  * @param port_id
13353  *   Index to port ID action resource.
13354  *
13355  * @return
13356  *   1 while a reference on it exists, 0 when freed.
13357  */
13358 static int
13359 flow_dv_matcher_release(struct rte_eth_dev *dev,
13360                         struct mlx5_flow_handle *handle)
13361 {
13362         struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
13363         struct mlx5_flow_tbl_data_entry *tbl = container_of(matcher->tbl,
13364                                                             typeof(*tbl), tbl);
13365         int ret;
13366
13367         MLX5_ASSERT(matcher->matcher_object);
13368         ret = mlx5_cache_unregister(&tbl->matchers, &matcher->entry);
13369         flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl->tbl);
13370         return ret;
13371 }
13372
13373 /**
13374  * Release encap_decap resource.
13375  *
13376  * @param list
13377  *   Pointer to the hash list.
13378  * @param entry
13379  *   Pointer to exist resource entry object.
13380  */
13381 void
13382 flow_dv_encap_decap_remove_cb(struct mlx5_hlist *list,
13383                               struct mlx5_hlist_entry *entry)
13384 {
13385         struct mlx5_dev_ctx_shared *sh = list->ctx;
13386         struct mlx5_flow_dv_encap_decap_resource *res =
13387                 container_of(entry, typeof(*res), entry);
13388
13389         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
13390         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
13391 }
13392
13393 /**
13394  * Release an encap/decap resource.
13395  *
13396  * @param dev
13397  *   Pointer to Ethernet device.
13398  * @param encap_decap_idx
13399  *   Index of encap decap resource.
13400  *
13401  * @return
13402  *   1 while a reference on it exists, 0 when freed.
13403  */
13404 static int
13405 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
13406                                      uint32_t encap_decap_idx)
13407 {
13408         struct mlx5_priv *priv = dev->data->dev_private;
13409         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
13410
13411         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
13412                                         encap_decap_idx);
13413         if (!cache_resource)
13414                 return 0;
13415         MLX5_ASSERT(cache_resource->action);
13416         return mlx5_hlist_unregister(priv->sh->encaps_decaps,
13417                                      &cache_resource->entry);
13418 }
13419
13420 /**
13421  * Release an jump to table action resource.
13422  *
13423  * @param dev
13424  *   Pointer to Ethernet device.
13425  * @param rix_jump
13426  *   Index to the jump action resource.
13427  *
13428  * @return
13429  *   1 while a reference on it exists, 0 when freed.
13430  */
13431 static int
13432 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
13433                                   uint32_t rix_jump)
13434 {
13435         struct mlx5_priv *priv = dev->data->dev_private;
13436         struct mlx5_flow_tbl_data_entry *tbl_data;
13437
13438         tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
13439                                   rix_jump);
13440         if (!tbl_data)
13441                 return 0;
13442         return flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl_data->tbl);
13443 }
13444
13445 void
13446 flow_dv_modify_remove_cb(struct mlx5_hlist *list __rte_unused,
13447                          struct mlx5_hlist_entry *entry)
13448 {
13449         struct mlx5_flow_dv_modify_hdr_resource *res =
13450                 container_of(entry, typeof(*res), entry);
13451
13452         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
13453         mlx5_free(entry);
13454 }
13455
13456 /**
13457  * Release a modify-header resource.
13458  *
13459  * @param dev
13460  *   Pointer to Ethernet device.
13461  * @param handle
13462  *   Pointer to mlx5_flow_handle.
13463  *
13464  * @return
13465  *   1 while a reference on it exists, 0 when freed.
13466  */
13467 static int
13468 flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev,
13469                                     struct mlx5_flow_handle *handle)
13470 {
13471         struct mlx5_priv *priv = dev->data->dev_private;
13472         struct mlx5_flow_dv_modify_hdr_resource *entry = handle->dvh.modify_hdr;
13473
13474         MLX5_ASSERT(entry->action);
13475         return mlx5_hlist_unregister(priv->sh->modify_cmds, &entry->entry);
13476 }
13477
13478 void
13479 flow_dv_port_id_remove_cb(struct mlx5_cache_list *list,
13480                           struct mlx5_cache_entry *entry)
13481 {
13482         struct mlx5_dev_ctx_shared *sh = list->ctx;
13483         struct mlx5_flow_dv_port_id_action_resource *cache =
13484                         container_of(entry, typeof(*cache), entry);
13485
13486         claim_zero(mlx5_flow_os_destroy_flow_action(cache->action));
13487         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], cache->idx);
13488 }
13489
13490 /**
13491  * Release port ID action resource.
13492  *
13493  * @param dev
13494  *   Pointer to Ethernet device.
13495  * @param handle
13496  *   Pointer to mlx5_flow_handle.
13497  *
13498  * @return
13499  *   1 while a reference on it exists, 0 when freed.
13500  */
13501 static int
13502 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
13503                                         uint32_t port_id)
13504 {
13505         struct mlx5_priv *priv = dev->data->dev_private;
13506         struct mlx5_flow_dv_port_id_action_resource *cache;
13507
13508         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], port_id);
13509         if (!cache)
13510                 return 0;
13511         MLX5_ASSERT(cache->action);
13512         return mlx5_cache_unregister(&priv->sh->port_id_action_list,
13513                                      &cache->entry);
13514 }
13515
13516 /**
13517  * Release shared RSS action resource.
13518  *
13519  * @param dev
13520  *   Pointer to Ethernet device.
13521  * @param srss
13522  *   Shared RSS action index.
13523  */
13524 static void
13525 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss)
13526 {
13527         struct mlx5_priv *priv = dev->data->dev_private;
13528         struct mlx5_shared_action_rss *shared_rss;
13529
13530         shared_rss = mlx5_ipool_get
13531                         (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], srss);
13532         __atomic_sub_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
13533 }
13534
13535 void
13536 flow_dv_push_vlan_remove_cb(struct mlx5_cache_list *list,
13537                             struct mlx5_cache_entry *entry)
13538 {
13539         struct mlx5_dev_ctx_shared *sh = list->ctx;
13540         struct mlx5_flow_dv_push_vlan_action_resource *cache =
13541                         container_of(entry, typeof(*cache), entry);
13542
13543         claim_zero(mlx5_flow_os_destroy_flow_action(cache->action));
13544         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], cache->idx);
13545 }
13546
13547 /**
13548  * Release push vlan action resource.
13549  *
13550  * @param dev
13551  *   Pointer to Ethernet device.
13552  * @param handle
13553  *   Pointer to mlx5_flow_handle.
13554  *
13555  * @return
13556  *   1 while a reference on it exists, 0 when freed.
13557  */
13558 static int
13559 flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev,
13560                                           struct mlx5_flow_handle *handle)
13561 {
13562         struct mlx5_priv *priv = dev->data->dev_private;
13563         struct mlx5_flow_dv_push_vlan_action_resource *cache;
13564         uint32_t idx = handle->dvh.rix_push_vlan;
13565
13566         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
13567         if (!cache)
13568                 return 0;
13569         MLX5_ASSERT(cache->action);
13570         return mlx5_cache_unregister(&priv->sh->push_vlan_action_list,
13571                                      &cache->entry);
13572 }
13573
13574 /**
13575  * Release the fate resource.
13576  *
13577  * @param dev
13578  *   Pointer to Ethernet device.
13579  * @param handle
13580  *   Pointer to mlx5_flow_handle.
13581  */
13582 static void
13583 flow_dv_fate_resource_release(struct rte_eth_dev *dev,
13584                                struct mlx5_flow_handle *handle)
13585 {
13586         if (!handle->rix_fate)
13587                 return;
13588         switch (handle->fate_action) {
13589         case MLX5_FLOW_FATE_QUEUE:
13590                 if (!handle->dvh.rix_sample && !handle->dvh.rix_dest_array)
13591                         mlx5_hrxq_release(dev, handle->rix_hrxq);
13592                 break;
13593         case MLX5_FLOW_FATE_JUMP:
13594                 flow_dv_jump_tbl_resource_release(dev, handle->rix_jump);
13595                 break;
13596         case MLX5_FLOW_FATE_PORT_ID:
13597                 flow_dv_port_id_action_resource_release(dev,
13598                                 handle->rix_port_id_action);
13599                 break;
13600         default:
13601                 DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
13602                 break;
13603         }
13604         handle->rix_fate = 0;
13605 }
13606
13607 void
13608 flow_dv_sample_remove_cb(struct mlx5_cache_list *list __rte_unused,
13609                          struct mlx5_cache_entry *entry)
13610 {
13611         struct mlx5_flow_dv_sample_resource *cache_resource =
13612                         container_of(entry, typeof(*cache_resource), entry);
13613         struct rte_eth_dev *dev = cache_resource->dev;
13614         struct mlx5_priv *priv = dev->data->dev_private;
13615
13616         if (cache_resource->verbs_action)
13617                 claim_zero(mlx5_flow_os_destroy_flow_action
13618                                 (cache_resource->verbs_action));
13619         if (cache_resource->normal_path_tbl)
13620                 flow_dv_tbl_resource_release(MLX5_SH(dev),
13621                         cache_resource->normal_path_tbl);
13622         flow_dv_sample_sub_actions_release(dev,
13623                                 &cache_resource->sample_idx);
13624         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
13625                         cache_resource->idx);
13626         DRV_LOG(DEBUG, "sample resource %p: removed",
13627                 (void *)cache_resource);
13628 }
13629
13630 /**
13631  * Release an sample resource.
13632  *
13633  * @param dev
13634  *   Pointer to Ethernet device.
13635  * @param handle
13636  *   Pointer to mlx5_flow_handle.
13637  *
13638  * @return
13639  *   1 while a reference on it exists, 0 when freed.
13640  */
13641 static int
13642 flow_dv_sample_resource_release(struct rte_eth_dev *dev,
13643                                      struct mlx5_flow_handle *handle)
13644 {
13645         struct mlx5_priv *priv = dev->data->dev_private;
13646         struct mlx5_flow_dv_sample_resource *cache_resource;
13647
13648         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
13649                          handle->dvh.rix_sample);
13650         if (!cache_resource)
13651                 return 0;
13652         MLX5_ASSERT(cache_resource->verbs_action);
13653         return mlx5_cache_unregister(&priv->sh->sample_action_list,
13654                                      &cache_resource->entry);
13655 }
13656
13657 void
13658 flow_dv_dest_array_remove_cb(struct mlx5_cache_list *list __rte_unused,
13659                              struct mlx5_cache_entry *entry)
13660 {
13661         struct mlx5_flow_dv_dest_array_resource *cache_resource =
13662                         container_of(entry, typeof(*cache_resource), entry);
13663         struct rte_eth_dev *dev = cache_resource->dev;
13664         struct mlx5_priv *priv = dev->data->dev_private;
13665         uint32_t i = 0;
13666
13667         MLX5_ASSERT(cache_resource->action);
13668         if (cache_resource->action)
13669                 claim_zero(mlx5_flow_os_destroy_flow_action
13670                                         (cache_resource->action));
13671         for (; i < cache_resource->num_of_dest; i++)
13672                 flow_dv_sample_sub_actions_release(dev,
13673                                 &cache_resource->sample_idx[i]);
13674         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
13675                         cache_resource->idx);
13676         DRV_LOG(DEBUG, "destination array resource %p: removed",
13677                 (void *)cache_resource);
13678 }
13679
13680 /**
13681  * Release an destination array resource.
13682  *
13683  * @param dev
13684  *   Pointer to Ethernet device.
13685  * @param handle
13686  *   Pointer to mlx5_flow_handle.
13687  *
13688  * @return
13689  *   1 while a reference on it exists, 0 when freed.
13690  */
13691 static int
13692 flow_dv_dest_array_resource_release(struct rte_eth_dev *dev,
13693                                     struct mlx5_flow_handle *handle)
13694 {
13695         struct mlx5_priv *priv = dev->data->dev_private;
13696         struct mlx5_flow_dv_dest_array_resource *cache;
13697
13698         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
13699                                handle->dvh.rix_dest_array);
13700         if (!cache)
13701                 return 0;
13702         MLX5_ASSERT(cache->action);
13703         return mlx5_cache_unregister(&priv->sh->dest_array_list,
13704                                      &cache->entry);
13705 }
13706
13707 static void
13708 flow_dv_geneve_tlv_option_resource_release(struct rte_eth_dev *dev)
13709 {
13710         struct mlx5_priv *priv = dev->data->dev_private;
13711         struct mlx5_dev_ctx_shared *sh = priv->sh;
13712         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
13713                                 sh->geneve_tlv_option_resource;
13714         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
13715         if (geneve_opt_resource) {
13716                 if (!(__atomic_sub_fetch(&geneve_opt_resource->refcnt, 1,
13717                                          __ATOMIC_RELAXED))) {
13718                         claim_zero(mlx5_devx_cmd_destroy
13719                                         (geneve_opt_resource->obj));
13720                         mlx5_free(sh->geneve_tlv_option_resource);
13721                         sh->geneve_tlv_option_resource = NULL;
13722                 }
13723         }
13724         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
13725 }
13726
13727 /**
13728  * Remove the flow from the NIC but keeps it in memory.
13729  * Lock free, (mutex should be acquired by caller).
13730  *
13731  * @param[in] dev
13732  *   Pointer to Ethernet device.
13733  * @param[in, out] flow
13734  *   Pointer to flow structure.
13735  */
13736 static void
13737 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
13738 {
13739         struct mlx5_flow_handle *dh;
13740         uint32_t handle_idx;
13741         struct mlx5_priv *priv = dev->data->dev_private;
13742
13743         if (!flow)
13744                 return;
13745         handle_idx = flow->dev_handles;
13746         while (handle_idx) {
13747                 dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
13748                                     handle_idx);
13749                 if (!dh)
13750                         return;
13751                 if (dh->drv_flow) {
13752                         claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow));
13753                         dh->drv_flow = NULL;
13754                 }
13755                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
13756                         flow_dv_fate_resource_release(dev, dh);
13757                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
13758                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
13759                 handle_idx = dh->next.next;
13760         }
13761 }
13762
13763 /**
13764  * Remove the flow from the NIC and the memory.
13765  * Lock free, (mutex should be acquired by caller).
13766  *
13767  * @param[in] dev
13768  *   Pointer to the Ethernet device structure.
13769  * @param[in, out] flow
13770  *   Pointer to flow structure.
13771  */
13772 static void
13773 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
13774 {
13775         struct mlx5_flow_handle *dev_handle;
13776         struct mlx5_priv *priv = dev->data->dev_private;
13777         struct mlx5_flow_meter_info *fm = NULL;
13778         uint32_t srss = 0;
13779
13780         if (!flow)
13781                 return;
13782         flow_dv_remove(dev, flow);
13783         if (flow->counter) {
13784                 flow_dv_counter_free(dev, flow->counter);
13785                 flow->counter = 0;
13786         }
13787         if (flow->meter) {
13788                 fm = flow_dv_meter_find_by_idx(priv, flow->meter);
13789                 if (fm)
13790                         mlx5_flow_meter_detach(priv, fm);
13791                 flow->meter = 0;
13792         }
13793         /* Keep the current age handling by default. */
13794         if (flow->indirect_type == MLX5_INDIRECT_ACTION_TYPE_CT && flow->ct)
13795                 flow_dv_aso_ct_release(dev, flow->ct);
13796         else if (flow->age)
13797                 flow_dv_aso_age_release(dev, flow->age);
13798         if (flow->geneve_tlv_option) {
13799                 flow_dv_geneve_tlv_option_resource_release(dev);
13800                 flow->geneve_tlv_option = 0;
13801         }
13802         while (flow->dev_handles) {
13803                 uint32_t tmp_idx = flow->dev_handles;
13804
13805                 dev_handle = mlx5_ipool_get(priv->sh->ipool
13806                                             [MLX5_IPOOL_MLX5_FLOW], tmp_idx);
13807                 if (!dev_handle)
13808                         return;
13809                 flow->dev_handles = dev_handle->next.next;
13810                 if (dev_handle->dvh.matcher)
13811                         flow_dv_matcher_release(dev, dev_handle);
13812                 if (dev_handle->dvh.rix_sample)
13813                         flow_dv_sample_resource_release(dev, dev_handle);
13814                 if (dev_handle->dvh.rix_dest_array)
13815                         flow_dv_dest_array_resource_release(dev, dev_handle);
13816                 if (dev_handle->dvh.rix_encap_decap)
13817                         flow_dv_encap_decap_resource_release(dev,
13818                                 dev_handle->dvh.rix_encap_decap);
13819                 if (dev_handle->dvh.modify_hdr)
13820                         flow_dv_modify_hdr_resource_release(dev, dev_handle);
13821                 if (dev_handle->dvh.rix_push_vlan)
13822                         flow_dv_push_vlan_action_resource_release(dev,
13823                                                                   dev_handle);
13824                 if (dev_handle->dvh.rix_tag)
13825                         flow_dv_tag_release(dev,
13826                                             dev_handle->dvh.rix_tag);
13827                 if (dev_handle->fate_action != MLX5_FLOW_FATE_SHARED_RSS)
13828                         flow_dv_fate_resource_release(dev, dev_handle);
13829                 else if (!srss)
13830                         srss = dev_handle->rix_srss;
13831                 if (fm && dev_handle->is_meter_flow_id &&
13832                     dev_handle->split_flow_id)
13833                         mlx5_ipool_free(fm->flow_ipool,
13834                                         dev_handle->split_flow_id);
13835                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
13836                            tmp_idx);
13837         }
13838         if (srss)
13839                 flow_dv_shared_rss_action_release(dev, srss);
13840 }
13841
13842 /**
13843  * Release array of hash RX queue objects.
13844  * Helper function.
13845  *
13846  * @param[in] dev
13847  *   Pointer to the Ethernet device structure.
13848  * @param[in, out] hrxqs
13849  *   Array of hash RX queue objects.
13850  *
13851  * @return
13852  *   Total number of references to hash RX queue objects in *hrxqs* array
13853  *   after this operation.
13854  */
13855 static int
13856 __flow_dv_hrxqs_release(struct rte_eth_dev *dev,
13857                         uint32_t (*hrxqs)[MLX5_RSS_HASH_FIELDS_LEN])
13858 {
13859         size_t i;
13860         int remaining = 0;
13861
13862         for (i = 0; i < RTE_DIM(*hrxqs); i++) {
13863                 int ret = mlx5_hrxq_release(dev, (*hrxqs)[i]);
13864
13865                 if (!ret)
13866                         (*hrxqs)[i] = 0;
13867                 remaining += ret;
13868         }
13869         return remaining;
13870 }
13871
13872 /**
13873  * Release all hash RX queue objects representing shared RSS action.
13874  *
13875  * @param[in] dev
13876  *   Pointer to the Ethernet device structure.
13877  * @param[in, out] action
13878  *   Shared RSS action to remove hash RX queue objects from.
13879  *
13880  * @return
13881  *   Total number of references to hash RX queue objects stored in *action*
13882  *   after this operation.
13883  *   Expected to be 0 if no external references held.
13884  */
13885 static int
13886 __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
13887                                  struct mlx5_shared_action_rss *shared_rss)
13888 {
13889         return __flow_dv_hrxqs_release(dev, &shared_rss->hrxq);
13890 }
13891
13892 /**
13893  * Adjust L3/L4 hash value of pre-created shared RSS hrxq according to
13894  * user input.
13895  *
13896  * Only one hash value is available for one L3+L4 combination:
13897  * for example:
13898  * MLX5_RSS_HASH_IPV4, MLX5_RSS_HASH_IPV4_SRC_ONLY, and
13899  * MLX5_RSS_HASH_IPV4_DST_ONLY are mutually exclusive so they can share
13900  * same slot in mlx5_rss_hash_fields.
13901  *
13902  * @param[in] rss
13903  *   Pointer to the shared action RSS conf.
13904  * @param[in, out] hash_field
13905  *   hash_field variable needed to be adjusted.
13906  *
13907  * @return
13908  *   void
13909  */
13910 static void
13911 __flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
13912                                      uint64_t *hash_field)
13913 {
13914         uint64_t rss_types = rss->origin.types;
13915
13916         switch (*hash_field & ~IBV_RX_HASH_INNER) {
13917         case MLX5_RSS_HASH_IPV4:
13918                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
13919                         *hash_field &= ~MLX5_RSS_HASH_IPV4;
13920                         if (rss_types & ETH_RSS_L3_DST_ONLY)
13921                                 *hash_field |= IBV_RX_HASH_DST_IPV4;
13922                         else if (rss_types & ETH_RSS_L3_SRC_ONLY)
13923                                 *hash_field |= IBV_RX_HASH_SRC_IPV4;
13924                         else
13925                                 *hash_field |= MLX5_RSS_HASH_IPV4;
13926                 }
13927                 return;
13928         case MLX5_RSS_HASH_IPV6:
13929                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
13930                         *hash_field &= ~MLX5_RSS_HASH_IPV6;
13931                         if (rss_types & ETH_RSS_L3_DST_ONLY)
13932                                 *hash_field |= IBV_RX_HASH_DST_IPV6;
13933                         else if (rss_types & ETH_RSS_L3_SRC_ONLY)
13934                                 *hash_field |= IBV_RX_HASH_SRC_IPV6;
13935                         else
13936                                 *hash_field |= MLX5_RSS_HASH_IPV6;
13937                 }
13938                 return;
13939         case MLX5_RSS_HASH_IPV4_UDP:
13940                 /* fall-through. */
13941         case MLX5_RSS_HASH_IPV6_UDP:
13942                 if (rss_types & ETH_RSS_UDP) {
13943                         *hash_field &= ~MLX5_UDP_IBV_RX_HASH;
13944                         if (rss_types & ETH_RSS_L4_DST_ONLY)
13945                                 *hash_field |= IBV_RX_HASH_DST_PORT_UDP;
13946                         else if (rss_types & ETH_RSS_L4_SRC_ONLY)
13947                                 *hash_field |= IBV_RX_HASH_SRC_PORT_UDP;
13948                         else
13949                                 *hash_field |= MLX5_UDP_IBV_RX_HASH;
13950                 }
13951                 return;
13952         case MLX5_RSS_HASH_IPV4_TCP:
13953                 /* fall-through. */
13954         case MLX5_RSS_HASH_IPV6_TCP:
13955                 if (rss_types & ETH_RSS_TCP) {
13956                         *hash_field &= ~MLX5_TCP_IBV_RX_HASH;
13957                         if (rss_types & ETH_RSS_L4_DST_ONLY)
13958                                 *hash_field |= IBV_RX_HASH_DST_PORT_TCP;
13959                         else if (rss_types & ETH_RSS_L4_SRC_ONLY)
13960                                 *hash_field |= IBV_RX_HASH_SRC_PORT_TCP;
13961                         else
13962                                 *hash_field |= MLX5_TCP_IBV_RX_HASH;
13963                 }
13964                 return;
13965         default:
13966                 return;
13967         }
13968 }
13969
13970 /**
13971  * Setup shared RSS action.
13972  * Prepare set of hash RX queue objects sufficient to handle all valid
13973  * hash_fields combinations (see enum ibv_rx_hash_fields).
13974  *
13975  * @param[in] dev
13976  *   Pointer to the Ethernet device structure.
13977  * @param[in] action_idx
13978  *   Shared RSS action ipool index.
13979  * @param[in, out] action
13980  *   Partially initialized shared RSS action.
13981  * @param[out] error
13982  *   Perform verbose error reporting if not NULL. Initialized in case of
13983  *   error only.
13984  *
13985  * @return
13986  *   0 on success, otherwise negative errno value.
13987  */
13988 static int
13989 __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
13990                            uint32_t action_idx,
13991                            struct mlx5_shared_action_rss *shared_rss,
13992                            struct rte_flow_error *error)
13993 {
13994         struct mlx5_flow_rss_desc rss_desc = { 0 };
13995         size_t i;
13996         int err;
13997
13998         if (mlx5_ind_table_obj_setup(dev, shared_rss->ind_tbl)) {
13999                 return rte_flow_error_set(error, rte_errno,
14000                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14001                                           "cannot setup indirection table");
14002         }
14003         memcpy(rss_desc.key, shared_rss->origin.key, MLX5_RSS_HASH_KEY_LEN);
14004         rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
14005         rss_desc.const_q = shared_rss->origin.queue;
14006         rss_desc.queue_num = shared_rss->origin.queue_num;
14007         /* Set non-zero value to indicate a shared RSS. */
14008         rss_desc.shared_rss = action_idx;
14009         rss_desc.ind_tbl = shared_rss->ind_tbl;
14010         for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
14011                 uint32_t hrxq_idx;
14012                 uint64_t hash_fields = mlx5_rss_hash_fields[i];
14013                 int tunnel = 0;
14014
14015                 __flow_dv_action_rss_l34_hash_adjust(shared_rss, &hash_fields);
14016                 if (shared_rss->origin.level > 1) {
14017                         hash_fields |= IBV_RX_HASH_INNER;
14018                         tunnel = 1;
14019                 }
14020                 rss_desc.tunnel = tunnel;
14021                 rss_desc.hash_fields = hash_fields;
14022                 hrxq_idx = mlx5_hrxq_get(dev, &rss_desc);
14023                 if (!hrxq_idx) {
14024                         rte_flow_error_set
14025                                 (error, rte_errno,
14026                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14027                                  "cannot get hash queue");
14028                         goto error_hrxq_new;
14029                 }
14030                 err = __flow_dv_action_rss_hrxq_set
14031                         (shared_rss, hash_fields, hrxq_idx);
14032                 MLX5_ASSERT(!err);
14033         }
14034         return 0;
14035 error_hrxq_new:
14036         err = rte_errno;
14037         __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
14038         if (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true))
14039                 shared_rss->ind_tbl = NULL;
14040         rte_errno = err;
14041         return -rte_errno;
14042 }
14043
14044 /**
14045  * Create shared RSS action.
14046  *
14047  * @param[in] dev
14048  *   Pointer to the Ethernet device structure.
14049  * @param[in] conf
14050  *   Shared action configuration.
14051  * @param[in] rss
14052  *   RSS action specification used to create shared action.
14053  * @param[out] error
14054  *   Perform verbose error reporting if not NULL. Initialized in case of
14055  *   error only.
14056  *
14057  * @return
14058  *   A valid shared action ID in case of success, 0 otherwise and
14059  *   rte_errno is set.
14060  */
14061 static uint32_t
14062 __flow_dv_action_rss_create(struct rte_eth_dev *dev,
14063                             const struct rte_flow_indir_action_conf *conf,
14064                             const struct rte_flow_action_rss *rss,
14065                             struct rte_flow_error *error)
14066 {
14067         struct mlx5_priv *priv = dev->data->dev_private;
14068         struct mlx5_shared_action_rss *shared_rss = NULL;
14069         void *queue = NULL;
14070         struct rte_flow_action_rss *origin;
14071         const uint8_t *rss_key;
14072         uint32_t queue_size = rss->queue_num * sizeof(uint16_t);
14073         uint32_t idx;
14074
14075         RTE_SET_USED(conf);
14076         queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
14077                             0, SOCKET_ID_ANY);
14078         shared_rss = mlx5_ipool_zmalloc
14079                          (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], &idx);
14080         if (!shared_rss || !queue) {
14081                 rte_flow_error_set(error, ENOMEM,
14082                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14083                                    "cannot allocate resource memory");
14084                 goto error_rss_init;
14085         }
14086         if (idx > (1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET)) {
14087                 rte_flow_error_set(error, E2BIG,
14088                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14089                                    "rss action number out of range");
14090                 goto error_rss_init;
14091         }
14092         shared_rss->ind_tbl = mlx5_malloc(MLX5_MEM_ZERO,
14093                                           sizeof(*shared_rss->ind_tbl),
14094                                           0, SOCKET_ID_ANY);
14095         if (!shared_rss->ind_tbl) {
14096                 rte_flow_error_set(error, ENOMEM,
14097                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14098                                    "cannot allocate resource memory");
14099                 goto error_rss_init;
14100         }
14101         memcpy(queue, rss->queue, queue_size);
14102         shared_rss->ind_tbl->queues = queue;
14103         shared_rss->ind_tbl->queues_n = rss->queue_num;
14104         origin = &shared_rss->origin;
14105         origin->func = rss->func;
14106         origin->level = rss->level;
14107         /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
14108         origin->types = !rss->types ? ETH_RSS_IP : rss->types;
14109         /* NULL RSS key indicates default RSS key. */
14110         rss_key = !rss->key ? rss_hash_default_key : rss->key;
14111         memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
14112         origin->key = &shared_rss->key[0];
14113         origin->key_len = MLX5_RSS_HASH_KEY_LEN;
14114         origin->queue = queue;
14115         origin->queue_num = rss->queue_num;
14116         if (__flow_dv_action_rss_setup(dev, idx, shared_rss, error))
14117                 goto error_rss_init;
14118         rte_spinlock_init(&shared_rss->action_rss_sl);
14119         __atomic_add_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
14120         rte_spinlock_lock(&priv->shared_act_sl);
14121         ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14122                      &priv->rss_shared_actions, idx, shared_rss, next);
14123         rte_spinlock_unlock(&priv->shared_act_sl);
14124         return idx;
14125 error_rss_init:
14126         if (shared_rss) {
14127                 if (shared_rss->ind_tbl)
14128                         mlx5_free(shared_rss->ind_tbl);
14129                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14130                                 idx);
14131         }
14132         if (queue)
14133                 mlx5_free(queue);
14134         return 0;
14135 }
14136
14137 /**
14138  * Destroy the shared RSS action.
14139  * Release related hash RX queue objects.
14140  *
14141  * @param[in] dev
14142  *   Pointer to the Ethernet device structure.
14143  * @param[in] idx
14144  *   The shared RSS action object ID to be removed.
14145  * @param[out] error
14146  *   Perform verbose error reporting if not NULL. Initialized in case of
14147  *   error only.
14148  *
14149  * @return
14150  *   0 on success, otherwise negative errno value.
14151  */
14152 static int
14153 __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx,
14154                              struct rte_flow_error *error)
14155 {
14156         struct mlx5_priv *priv = dev->data->dev_private;
14157         struct mlx5_shared_action_rss *shared_rss =
14158             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
14159         uint32_t old_refcnt = 1;
14160         int remaining;
14161         uint16_t *queue = NULL;
14162
14163         if (!shared_rss)
14164                 return rte_flow_error_set(error, EINVAL,
14165                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14166                                           "invalid shared action");
14167         remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
14168         if (remaining)
14169                 return rte_flow_error_set(error, EBUSY,
14170                                           RTE_FLOW_ERROR_TYPE_ACTION,
14171                                           NULL,
14172                                           "shared rss hrxq has references");
14173         if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt,
14174                                          0, 0, __ATOMIC_ACQUIRE,
14175                                          __ATOMIC_RELAXED))
14176                 return rte_flow_error_set(error, EBUSY,
14177                                           RTE_FLOW_ERROR_TYPE_ACTION,
14178                                           NULL,
14179                                           "shared rss has references");
14180         queue = shared_rss->ind_tbl->queues;
14181         remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true);
14182         if (remaining)
14183                 return rte_flow_error_set(error, EBUSY,
14184                                           RTE_FLOW_ERROR_TYPE_ACTION,
14185                                           NULL,
14186                                           "shared rss indirection table has"
14187                                           " references");
14188         mlx5_free(queue);
14189         rte_spinlock_lock(&priv->shared_act_sl);
14190         ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14191                      &priv->rss_shared_actions, idx, shared_rss, next);
14192         rte_spinlock_unlock(&priv->shared_act_sl);
14193         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14194                         idx);
14195         return 0;
14196 }
14197
14198 /**
14199  * Create indirect action, lock free,
14200  * (mutex should be acquired by caller).
14201  * Dispatcher for action type specific call.
14202  *
14203  * @param[in] dev
14204  *   Pointer to the Ethernet device structure.
14205  * @param[in] conf
14206  *   Shared action configuration.
14207  * @param[in] action
14208  *   Action specification used to create indirect action.
14209  * @param[out] error
14210  *   Perform verbose error reporting if not NULL. Initialized in case of
14211  *   error only.
14212  *
14213  * @return
14214  *   A valid shared action handle in case of success, NULL otherwise and
14215  *   rte_errno is set.
14216  */
14217 static struct rte_flow_action_handle *
14218 flow_dv_action_create(struct rte_eth_dev *dev,
14219                       const struct rte_flow_indir_action_conf *conf,
14220                       const struct rte_flow_action *action,
14221                       struct rte_flow_error *err)
14222 {
14223         struct mlx5_priv *priv = dev->data->dev_private;
14224         uint32_t age_idx = 0;
14225         uint32_t idx = 0;
14226         uint32_t ret = 0;
14227
14228         switch (action->type) {
14229         case RTE_FLOW_ACTION_TYPE_RSS:
14230                 ret = __flow_dv_action_rss_create(dev, conf, action->conf, err);
14231                 idx = (MLX5_INDIRECT_ACTION_TYPE_RSS <<
14232                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
14233                 break;
14234         case RTE_FLOW_ACTION_TYPE_AGE:
14235                 age_idx = flow_dv_aso_age_alloc(dev, err);
14236                 if (!age_idx) {
14237                         ret = -rte_errno;
14238                         break;
14239                 }
14240                 idx = (MLX5_INDIRECT_ACTION_TYPE_AGE <<
14241                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | age_idx;
14242                 flow_dv_aso_age_params_init(dev, age_idx,
14243                                         ((const struct rte_flow_action_age *)
14244                                                 action->conf)->context ?
14245                                         ((const struct rte_flow_action_age *)
14246                                                 action->conf)->context :
14247                                         (void *)(uintptr_t)idx,
14248                                         ((const struct rte_flow_action_age *)
14249                                                 action->conf)->timeout);
14250                 ret = age_idx;
14251                 break;
14252         case RTE_FLOW_ACTION_TYPE_COUNT:
14253                 ret = flow_dv_translate_create_counter(dev, NULL, NULL, NULL);
14254                 idx = (MLX5_INDIRECT_ACTION_TYPE_COUNT <<
14255                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
14256                 break;
14257         case RTE_FLOW_ACTION_TYPE_CONNTRACK:
14258                 ret = flow_dv_translate_create_conntrack(dev, action->conf,
14259                                                          err);
14260                 idx = MLX5_INDIRECT_ACT_CT_GEN_IDX(PORT_ID(priv), ret);
14261                 break;
14262         default:
14263                 rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
14264                                    NULL, "action type not supported");
14265                 break;
14266         }
14267         return ret ? (struct rte_flow_action_handle *)(uintptr_t)idx : NULL;
14268 }
14269
14270 /**
14271  * Destroy the indirect action.
14272  * Release action related resources on the NIC and the memory.
14273  * Lock free, (mutex should be acquired by caller).
14274  * Dispatcher for action type specific call.
14275  *
14276  * @param[in] dev
14277  *   Pointer to the Ethernet device structure.
14278  * @param[in] handle
14279  *   The indirect action object handle to be removed.
14280  * @param[out] error
14281  *   Perform verbose error reporting if not NULL. Initialized in case of
14282  *   error only.
14283  *
14284  * @return
14285  *   0 on success, otherwise negative errno value.
14286  */
14287 static int
14288 flow_dv_action_destroy(struct rte_eth_dev *dev,
14289                        struct rte_flow_action_handle *handle,
14290                        struct rte_flow_error *error)
14291 {
14292         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
14293         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
14294         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
14295         struct mlx5_flow_counter *cnt;
14296         uint32_t no_flow_refcnt = 1;
14297         int ret;
14298
14299         switch (type) {
14300         case MLX5_INDIRECT_ACTION_TYPE_RSS:
14301                 return __flow_dv_action_rss_release(dev, idx, error);
14302         case MLX5_INDIRECT_ACTION_TYPE_COUNT:
14303                 cnt = flow_dv_counter_get_by_idx(dev, idx, NULL);
14304                 if (!__atomic_compare_exchange_n(&cnt->shared_info.refcnt,
14305                                                  &no_flow_refcnt, 1, false,
14306                                                  __ATOMIC_ACQUIRE,
14307                                                  __ATOMIC_RELAXED))
14308                         return rte_flow_error_set(error, EBUSY,
14309                                                   RTE_FLOW_ERROR_TYPE_ACTION,
14310                                                   NULL,
14311                                                   "Indirect count action has references");
14312                 flow_dv_counter_free(dev, idx);
14313                 return 0;
14314         case MLX5_INDIRECT_ACTION_TYPE_AGE:
14315                 ret = flow_dv_aso_age_release(dev, idx);
14316                 if (ret)
14317                         /*
14318                          * In this case, the last flow has a reference will
14319                          * actually release the age action.
14320                          */
14321                         DRV_LOG(DEBUG, "Indirect age action %" PRIu32 " was"
14322                                 " released with references %d.", idx, ret);
14323                 return 0;
14324         case MLX5_INDIRECT_ACTION_TYPE_CT:
14325                 ret = flow_dv_aso_ct_release(dev, idx);
14326                 if (ret < 0)
14327                         return ret;
14328                 if (ret > 0)
14329                         DRV_LOG(DEBUG, "Connection tracking object %u still "
14330                                 "has references %d.", idx, ret);
14331                 return 0;
14332         default:
14333                 return rte_flow_error_set(error, ENOTSUP,
14334                                           RTE_FLOW_ERROR_TYPE_ACTION,
14335                                           NULL,
14336                                           "action type not supported");
14337         }
14338 }
14339
14340 /**
14341  * Updates in place shared RSS action configuration.
14342  *
14343  * @param[in] dev
14344  *   Pointer to the Ethernet device structure.
14345  * @param[in] idx
14346  *   The shared RSS action object ID to be updated.
14347  * @param[in] action_conf
14348  *   RSS action specification used to modify *shared_rss*.
14349  * @param[out] error
14350  *   Perform verbose error reporting if not NULL. Initialized in case of
14351  *   error only.
14352  *
14353  * @return
14354  *   0 on success, otherwise negative errno value.
14355  * @note: currently only support update of RSS queues.
14356  */
14357 static int
14358 __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx,
14359                             const struct rte_flow_action_rss *action_conf,
14360                             struct rte_flow_error *error)
14361 {
14362         struct mlx5_priv *priv = dev->data->dev_private;
14363         struct mlx5_shared_action_rss *shared_rss =
14364             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
14365         int ret = 0;
14366         void *queue = NULL;
14367         uint16_t *queue_old = NULL;
14368         uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);
14369
14370         if (!shared_rss)
14371                 return rte_flow_error_set(error, EINVAL,
14372                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14373                                           "invalid shared action to update");
14374         if (priv->obj_ops.ind_table_modify == NULL)
14375                 return rte_flow_error_set(error, ENOTSUP,
14376                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14377                                           "cannot modify indirection table");
14378         queue = mlx5_malloc(MLX5_MEM_ZERO,
14379                             RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
14380                             0, SOCKET_ID_ANY);
14381         if (!queue)
14382                 return rte_flow_error_set(error, ENOMEM,
14383                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14384                                           NULL,
14385                                           "cannot allocate resource memory");
14386         memcpy(queue, action_conf->queue, queue_size);
14387         MLX5_ASSERT(shared_rss->ind_tbl);
14388         rte_spinlock_lock(&shared_rss->action_rss_sl);
14389         queue_old = shared_rss->ind_tbl->queues;
14390         ret = mlx5_ind_table_obj_modify(dev, shared_rss->ind_tbl,
14391                                         queue, action_conf->queue_num, true);
14392         if (ret) {
14393                 mlx5_free(queue);
14394                 ret = rte_flow_error_set(error, rte_errno,
14395                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14396                                           "cannot update indirection table");
14397         } else {
14398                 mlx5_free(queue_old);
14399                 shared_rss->origin.queue = queue;
14400                 shared_rss->origin.queue_num = action_conf->queue_num;
14401         }
14402         rte_spinlock_unlock(&shared_rss->action_rss_sl);
14403         return ret;
14404 }
14405
14406 /*
14407  * Updates in place conntrack context or direction.
14408  * Context update should be synchronized.
14409  *
14410  * @param[in] dev
14411  *   Pointer to the Ethernet device structure.
14412  * @param[in] idx
14413  *   The conntrack object ID to be updated.
14414  * @param[in] update
14415  *   Pointer to the structure of information to update.
14416  * @param[out] error
14417  *   Perform verbose error reporting if not NULL. Initialized in case of
14418  *   error only.
14419  *
14420  * @return
14421  *   0 on success, otherwise negative errno value.
14422  */
14423 static int
14424 __flow_dv_action_ct_update(struct rte_eth_dev *dev, uint32_t idx,
14425                            const struct rte_flow_modify_conntrack *update,
14426                            struct rte_flow_error *error)
14427 {
14428         struct mlx5_priv *priv = dev->data->dev_private;
14429         struct mlx5_aso_ct_action *ct;
14430         const struct rte_flow_action_conntrack *new_prf;
14431         int ret = 0;
14432         uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx);
14433         uint32_t dev_idx;
14434
14435         if (PORT_ID(priv) != owner)
14436                 return rte_flow_error_set(error, EACCES,
14437                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14438                                           NULL,
14439                                           "CT object owned by another port");
14440         dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx);
14441         ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx);
14442         if (!ct->refcnt)
14443                 return rte_flow_error_set(error, ENOMEM,
14444                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14445                                           NULL,
14446                                           "CT object is inactive");
14447         new_prf = &update->new_ct;
14448         if (update->direction)
14449                 ct->is_original = !!new_prf->is_original_dir;
14450         if (update->state) {
14451                 /* Only validate the profile when it needs to be updated. */
14452                 ret = mlx5_validate_action_ct(dev, new_prf, error);
14453                 if (ret)
14454                         return ret;
14455                 ret = mlx5_aso_ct_update_by_wqe(priv->sh, ct, new_prf);
14456                 if (ret)
14457                         return rte_flow_error_set(error, EIO,
14458                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14459                                         NULL,
14460                                         "Failed to send CT context update WQE");
14461                 /* Block until ready or a failure. */
14462                 ret = mlx5_aso_ct_available(priv->sh, ct);
14463                 if (ret)
14464                         rte_flow_error_set(error, rte_errno,
14465                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14466                                            NULL,
14467                                            "Timeout to get the CT update");
14468         }
14469         return ret;
14470 }
14471
14472 /**
14473  * Updates in place shared action configuration, lock free,
14474  * (mutex should be acquired by caller).
14475  *
14476  * @param[in] dev
14477  *   Pointer to the Ethernet device structure.
14478  * @param[in] handle
14479  *   The indirect action object handle to be updated.
14480  * @param[in] update
14481  *   Action specification used to modify the action pointed by *handle*.
14482  *   *update* could be of same type with the action pointed by the *handle*
14483  *   handle argument, or some other structures like a wrapper, depending on
14484  *   the indirect action type.
14485  * @param[out] error
14486  *   Perform verbose error reporting if not NULL. Initialized in case of
14487  *   error only.
14488  *
14489  * @return
14490  *   0 on success, otherwise negative errno value.
14491  */
14492 static int
14493 flow_dv_action_update(struct rte_eth_dev *dev,
14494                         struct rte_flow_action_handle *handle,
14495                         const void *update,
14496                         struct rte_flow_error *err)
14497 {
14498         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
14499         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
14500         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
14501         const void *action_conf;
14502
14503         switch (type) {
14504         case MLX5_INDIRECT_ACTION_TYPE_RSS:
14505                 action_conf = ((const struct rte_flow_action *)update)->conf;
14506                 return __flow_dv_action_rss_update(dev, idx, action_conf, err);
14507         case MLX5_INDIRECT_ACTION_TYPE_CT:
14508                 return __flow_dv_action_ct_update(dev, idx, update, err);
14509         default:
14510                 return rte_flow_error_set(err, ENOTSUP,
14511                                           RTE_FLOW_ERROR_TYPE_ACTION,
14512                                           NULL,
14513                                           "action type update not supported");
14514         }
14515 }
14516
14517 /**
14518  * Destroy the meter sub policy table rules.
14519  * Lock free, (mutex should be acquired by caller).
14520  *
14521  * @param[in] dev
14522  *   Pointer to Ethernet device.
14523  * @param[in] sub_policy
14524  *   Pointer to meter sub policy table.
14525  */
14526 static void
14527 __flow_dv_destroy_sub_policy_rules(struct rte_eth_dev *dev,
14528                              struct mlx5_flow_meter_sub_policy *sub_policy)
14529 {
14530         struct mlx5_flow_tbl_data_entry *tbl;
14531         int i;
14532
14533         for (i = 0; i < RTE_COLORS; i++) {
14534                 if (sub_policy->color_rule[i]) {
14535                         claim_zero(mlx5_flow_os_destroy_flow
14536                                 (sub_policy->color_rule[i]));
14537                         sub_policy->color_rule[i] = NULL;
14538                 }
14539                 if (sub_policy->color_matcher[i]) {
14540                         tbl = container_of(sub_policy->color_matcher[i]->tbl,
14541                                 typeof(*tbl), tbl);
14542                         mlx5_cache_unregister(&tbl->matchers,
14543                                       &sub_policy->color_matcher[i]->entry);
14544                         sub_policy->color_matcher[i] = NULL;
14545                 }
14546         }
14547         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
14548                 if (sub_policy->rix_hrxq[i]) {
14549                         mlx5_hrxq_release(dev, sub_policy->rix_hrxq[i]);
14550                         sub_policy->rix_hrxq[i] = 0;
14551                 }
14552                 if (sub_policy->jump_tbl[i]) {
14553                         flow_dv_tbl_resource_release(MLX5_SH(dev),
14554                         sub_policy->jump_tbl[i]);
14555                         sub_policy->jump_tbl[i] = NULL;
14556                 }
14557         }
14558         if (sub_policy->tbl_rsc) {
14559                 flow_dv_tbl_resource_release(MLX5_SH(dev),
14560                         sub_policy->tbl_rsc);
14561                 sub_policy->tbl_rsc = NULL;
14562         }
14563 }
14564
14565 /**
14566  * Destroy policy rules, lock free,
14567  * (mutex should be acquired by caller).
14568  * Dispatcher for action type specific call.
14569  *
14570  * @param[in] dev
14571  *   Pointer to the Ethernet device structure.
14572  * @param[in] mtr_policy
14573  *   Meter policy struct.
14574  */
14575 static void
14576 flow_dv_destroy_policy_rules(struct rte_eth_dev *dev,
14577                       struct mlx5_flow_meter_policy *mtr_policy)
14578 {
14579         uint32_t i, j;
14580         struct mlx5_flow_meter_sub_policy *sub_policy;
14581         uint16_t sub_policy_num;
14582
14583         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
14584                 sub_policy_num = (mtr_policy->sub_policy_num >>
14585                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
14586                         MLX5_MTR_SUB_POLICY_NUM_MASK;
14587                 for (j = 0; j < sub_policy_num; j++) {
14588                         sub_policy = mtr_policy->sub_policys[i][j];
14589                         if (sub_policy)
14590                                 __flow_dv_destroy_sub_policy_rules
14591                                                 (dev, sub_policy);
14592                 }
14593         }
14594 }
14595
14596 /**
14597  * Destroy policy action, lock free,
14598  * (mutex should be acquired by caller).
14599  * Dispatcher for action type specific call.
14600  *
14601  * @param[in] dev
14602  *   Pointer to the Ethernet device structure.
14603  * @param[in] mtr_policy
14604  *   Meter policy struct.
14605  */
14606 static void
14607 flow_dv_destroy_mtr_policy_acts(struct rte_eth_dev *dev,
14608                       struct mlx5_flow_meter_policy *mtr_policy)
14609 {
14610         struct rte_flow_action *rss_action;
14611         struct mlx5_flow_handle dev_handle;
14612         uint32_t i, j;
14613
14614         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
14615                 if (mtr_policy->act_cnt[i].rix_mark) {
14616                         flow_dv_tag_release(dev,
14617                                 mtr_policy->act_cnt[i].rix_mark);
14618                         mtr_policy->act_cnt[i].rix_mark = 0;
14619                 }
14620                 if (mtr_policy->act_cnt[i].modify_hdr) {
14621                         dev_handle.dvh.modify_hdr =
14622                                 mtr_policy->act_cnt[i].modify_hdr;
14623                         flow_dv_modify_hdr_resource_release(dev, &dev_handle);
14624                 }
14625                 switch (mtr_policy->act_cnt[i].fate_action) {
14626                 case MLX5_FLOW_FATE_SHARED_RSS:
14627                         rss_action = mtr_policy->act_cnt[i].rss;
14628                         mlx5_free(rss_action);
14629                         break;
14630                 case MLX5_FLOW_FATE_PORT_ID:
14631                         if (mtr_policy->act_cnt[i].rix_port_id_action) {
14632                                 flow_dv_port_id_action_resource_release(dev,
14633                                 mtr_policy->act_cnt[i].rix_port_id_action);
14634                                 mtr_policy->act_cnt[i].rix_port_id_action = 0;
14635                         }
14636                         break;
14637                 case MLX5_FLOW_FATE_DROP:
14638                 case MLX5_FLOW_FATE_JUMP:
14639                         for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
14640                                 mtr_policy->act_cnt[i].dr_jump_action[j] =
14641                                                 NULL;
14642                         break;
14643                 default:
14644                         /*Queue action do nothing*/
14645                         break;
14646                 }
14647         }
14648         for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
14649                 mtr_policy->dr_drop_action[j] = NULL;
14650 }
14651
14652 /**
14653  * Create policy action per domain, lock free,
14654  * (mutex should be acquired by caller).
14655  * Dispatcher for action type specific call.
14656  *
14657  * @param[in] dev
14658  *   Pointer to the Ethernet device structure.
14659  * @param[in] mtr_policy
14660  *   Meter policy struct.
14661  * @param[in] action
14662  *   Action specification used to create meter actions.
14663  * @param[out] error
14664  *   Perform verbose error reporting if not NULL. Initialized in case of
14665  *   error only.
14666  *
14667  * @return
14668  *   0 on success, otherwise negative errno value.
14669  */
14670 static int
14671 __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,
14672                         struct mlx5_flow_meter_policy *mtr_policy,
14673                         const struct rte_flow_action *actions[RTE_COLORS],
14674                         enum mlx5_meter_domain domain,
14675                         struct rte_mtr_error *error)
14676 {
14677         struct mlx5_priv *priv = dev->data->dev_private;
14678         struct rte_flow_error flow_err;
14679         const struct rte_flow_action *act;
14680         uint64_t action_flags = 0;
14681         struct mlx5_flow_handle dh;
14682         struct mlx5_flow dev_flow;
14683         struct mlx5_flow_dv_port_id_action_resource port_id_action;
14684         int i, ret;
14685         uint8_t egress, transfer;
14686         struct mlx5_meter_policy_action_container *act_cnt = NULL;
14687         union {
14688                 struct mlx5_flow_dv_modify_hdr_resource res;
14689                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
14690                             sizeof(struct mlx5_modification_cmd) *
14691                             (MLX5_MAX_MODIFY_NUM + 1)];
14692         } mhdr_dummy;
14693
14694         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
14695         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
14696         memset(&dh, 0, sizeof(struct mlx5_flow_handle));
14697         memset(&dev_flow, 0, sizeof(struct mlx5_flow));
14698         memset(&port_id_action, 0,
14699                 sizeof(struct mlx5_flow_dv_port_id_action_resource));
14700         dev_flow.handle = &dh;
14701         dev_flow.dv.port_id_action = &port_id_action;
14702         dev_flow.external = true;
14703         for (i = 0; i < RTE_COLORS; i++) {
14704                 if (i < MLX5_MTR_RTE_COLORS)
14705                         act_cnt = &mtr_policy->act_cnt[i];
14706                 for (act = actions[i];
14707                         act && act->type != RTE_FLOW_ACTION_TYPE_END;
14708                         act++) {
14709                         switch (act->type) {
14710                         case RTE_FLOW_ACTION_TYPE_MARK:
14711                         {
14712                                 uint32_t tag_be = mlx5_flow_mark_set
14713                                         (((const struct rte_flow_action_mark *)
14714                                         (act->conf))->id);
14715
14716                                 if (i >= MLX5_MTR_RTE_COLORS)
14717                                         return -rte_mtr_error_set(error,
14718                                           ENOTSUP,
14719                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
14720                                           NULL,
14721                                           "cannot create policy "
14722                                           "mark action for this color");
14723                                 dev_flow.handle->mark = 1;
14724                                 if (flow_dv_tag_resource_register(dev, tag_be,
14725                                                   &dev_flow, &flow_err))
14726                                         return -rte_mtr_error_set(error,
14727                                         ENOTSUP,
14728                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14729                                         NULL,
14730                                         "cannot setup policy mark action");
14731                                 MLX5_ASSERT(dev_flow.dv.tag_resource);
14732                                 act_cnt->rix_mark =
14733                                         dev_flow.handle->dvh.rix_tag;
14734                                 action_flags |= MLX5_FLOW_ACTION_MARK;
14735                                 break;
14736                         }
14737                         case RTE_FLOW_ACTION_TYPE_SET_TAG:
14738                         {
14739                                 struct mlx5_flow_dv_modify_hdr_resource
14740                                         *mhdr_res = &mhdr_dummy.res;
14741
14742                                 if (i >= MLX5_MTR_RTE_COLORS)
14743                                         return -rte_mtr_error_set(error,
14744                                           ENOTSUP,
14745                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
14746                                           NULL,
14747                                           "cannot create policy "
14748                                           "set tag action for this color");
14749                                 memset(mhdr_res, 0, sizeof(*mhdr_res));
14750                                 mhdr_res->ft_type = transfer ?
14751                                         MLX5DV_FLOW_TABLE_TYPE_FDB :
14752                                         egress ?
14753                                         MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
14754                                         MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
14755                                 if (flow_dv_convert_action_set_tag
14756                                 (dev, mhdr_res,
14757                                 (const struct rte_flow_action_set_tag *)
14758                                 act->conf,  &flow_err))
14759                                         return -rte_mtr_error_set(error,
14760                                         ENOTSUP,
14761                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14762                                         NULL, "cannot convert policy "
14763                                         "set tag action");
14764                                 if (!mhdr_res->actions_num)
14765                                         return -rte_mtr_error_set(error,
14766                                         ENOTSUP,
14767                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14768                                         NULL, "cannot find policy "
14769                                         "set tag action");
14770                                 /* create modify action if needed. */
14771                                 dev_flow.dv.group = 1;
14772                                 if (flow_dv_modify_hdr_resource_register
14773                                         (dev, mhdr_res, &dev_flow, &flow_err))
14774                                         return -rte_mtr_error_set(error,
14775                                         ENOTSUP,
14776                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14777                                         NULL, "cannot register policy "
14778                                         "set tag action");
14779                                 act_cnt->modify_hdr =
14780                                 dev_flow.handle->dvh.modify_hdr;
14781                                 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
14782                                 break;
14783                         }
14784                         case RTE_FLOW_ACTION_TYPE_DROP:
14785                         {
14786                                 struct mlx5_flow_mtr_mng *mtrmng =
14787                                                 priv->sh->mtrmng;
14788                                 struct mlx5_flow_tbl_data_entry *tbl_data;
14789
14790                                 /*
14791                                  * Create the drop table with
14792                                  * METER DROP level.
14793                                  */
14794                                 if (!mtrmng->drop_tbl[domain]) {
14795                                         mtrmng->drop_tbl[domain] =
14796                                         flow_dv_tbl_resource_get(dev,
14797                                         MLX5_FLOW_TABLE_LEVEL_METER,
14798                                         egress, transfer, false, NULL, 0,
14799                                         0, MLX5_MTR_TABLE_ID_DROP, &flow_err);
14800                                         if (!mtrmng->drop_tbl[domain])
14801                                                 return -rte_mtr_error_set
14802                                         (error, ENOTSUP,
14803                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14804                                         NULL,
14805                                         "Failed to create meter drop table");
14806                                 }
14807                                 tbl_data = container_of
14808                                 (mtrmng->drop_tbl[domain],
14809                                 struct mlx5_flow_tbl_data_entry, tbl);
14810                                 if (i < MLX5_MTR_RTE_COLORS) {
14811                                         act_cnt->dr_jump_action[domain] =
14812                                                 tbl_data->jump.action;
14813                                         act_cnt->fate_action =
14814                                                 MLX5_FLOW_FATE_DROP;
14815                                 }
14816                                 if (i == RTE_COLOR_RED)
14817                                         mtr_policy->dr_drop_action[domain] =
14818                                                 tbl_data->jump.action;
14819                                 action_flags |= MLX5_FLOW_ACTION_DROP;
14820                                 break;
14821                         }
14822                         case RTE_FLOW_ACTION_TYPE_QUEUE:
14823                         {
14824                                 if (i >= MLX5_MTR_RTE_COLORS)
14825                                         return -rte_mtr_error_set(error,
14826                                         ENOTSUP,
14827                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14828                                         NULL, "cannot create policy "
14829                                         "fate queue for this color");
14830                                 act_cnt->queue =
14831                                 ((const struct rte_flow_action_queue *)
14832                                         (act->conf))->index;
14833                                 act_cnt->fate_action =
14834                                         MLX5_FLOW_FATE_QUEUE;
14835                                 dev_flow.handle->fate_action =
14836                                         MLX5_FLOW_FATE_QUEUE;
14837                                 mtr_policy->is_queue = 1;
14838                                 action_flags |= MLX5_FLOW_ACTION_QUEUE;
14839                                 break;
14840                         }
14841                         case RTE_FLOW_ACTION_TYPE_RSS:
14842                         {
14843                                 int rss_size;
14844
14845                                 if (i >= MLX5_MTR_RTE_COLORS)
14846                                         return -rte_mtr_error_set(error,
14847                                           ENOTSUP,
14848                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
14849                                           NULL,
14850                                           "cannot create policy "
14851                                           "rss action for this color");
14852                                 /*
14853                                  * Save RSS conf into policy struct
14854                                  * for translate stage.
14855                                  */
14856                                 rss_size = (int)rte_flow_conv
14857                                         (RTE_FLOW_CONV_OP_ACTION,
14858                                         NULL, 0, act, &flow_err);
14859                                 if (rss_size <= 0)
14860                                         return -rte_mtr_error_set(error,
14861                                           ENOTSUP,
14862                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
14863                                           NULL, "Get the wrong "
14864                                           "rss action struct size");
14865                                 act_cnt->rss = mlx5_malloc(MLX5_MEM_ZERO,
14866                                                 rss_size, 0, SOCKET_ID_ANY);
14867                                 if (!act_cnt->rss)
14868                                         return -rte_mtr_error_set(error,
14869                                           ENOTSUP,
14870                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
14871                                           NULL,
14872                                           "Fail to malloc rss action memory");
14873                                 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION,
14874                                         act_cnt->rss, rss_size,
14875                                         act, &flow_err);
14876                                 if (ret < 0)
14877                                         return -rte_mtr_error_set(error,
14878                                           ENOTSUP,
14879                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
14880                                           NULL, "Fail to save "
14881                                           "rss action into policy struct");
14882                                 act_cnt->fate_action =
14883                                         MLX5_FLOW_FATE_SHARED_RSS;
14884                                 action_flags |= MLX5_FLOW_ACTION_RSS;
14885                                 break;
14886                         }
14887                         case RTE_FLOW_ACTION_TYPE_PORT_ID:
14888                         {
14889                                 struct mlx5_flow_dv_port_id_action_resource
14890                                         port_id_resource;
14891                                 uint32_t port_id = 0;
14892
14893                                 if (i >= MLX5_MTR_RTE_COLORS)
14894                                         return -rte_mtr_error_set(error,
14895                                         ENOTSUP,
14896                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14897                                         NULL, "cannot create policy "
14898                                         "port action for this color");
14899                                 memset(&port_id_resource, 0,
14900                                         sizeof(port_id_resource));
14901                                 if (flow_dv_translate_action_port_id(dev, act,
14902                                                 &port_id, &flow_err))
14903                                         return -rte_mtr_error_set(error,
14904                                         ENOTSUP,
14905                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14906                                         NULL, "cannot translate "
14907                                         "policy port action");
14908                                 port_id_resource.port_id = port_id;
14909                                 if (flow_dv_port_id_action_resource_register
14910                                         (dev, &port_id_resource,
14911                                         &dev_flow, &flow_err))
14912                                         return -rte_mtr_error_set(error,
14913                                         ENOTSUP,
14914                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14915                                         NULL, "cannot setup "
14916                                         "policy port action");
14917                                 act_cnt->rix_port_id_action =
14918                                         dev_flow.handle->rix_port_id_action;
14919                                 act_cnt->fate_action =
14920                                         MLX5_FLOW_FATE_PORT_ID;
14921                                 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
14922                                 break;
14923                         }
14924                         case RTE_FLOW_ACTION_TYPE_JUMP:
14925                         {
14926                                 uint32_t jump_group = 0;
14927                                 uint32_t table = 0;
14928                                 struct mlx5_flow_tbl_data_entry *tbl_data;
14929                                 struct flow_grp_info grp_info = {
14930                                         .external = !!dev_flow.external,
14931                                         .transfer = !!transfer,
14932                                         .fdb_def_rule = !!priv->fdb_def_rule,
14933                                         .std_tbl_fix = 0,
14934                                         .skip_scale = dev_flow.skip_scale &
14935                                         (1 << MLX5_SCALE_FLOW_GROUP_BIT),
14936                                 };
14937                                 struct mlx5_flow_meter_sub_policy *sub_policy =
14938                                 mtr_policy->sub_policys[domain][0];
14939
14940                                 if (i >= MLX5_MTR_RTE_COLORS)
14941                                         return -rte_mtr_error_set(error,
14942                                           ENOTSUP,
14943                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
14944                                           NULL,
14945                                           "cannot create policy "
14946                                           "jump action for this color");
14947                                 jump_group =
14948                                 ((const struct rte_flow_action_jump *)
14949                                                         act->conf)->group;
14950                                 if (mlx5_flow_group_to_table(dev, NULL,
14951                                                        jump_group,
14952                                                        &table,
14953                                                        &grp_info, &flow_err))
14954                                         return -rte_mtr_error_set(error,
14955                                         ENOTSUP,
14956                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14957                                         NULL, "cannot setup "
14958                                         "policy jump action");
14959                                 sub_policy->jump_tbl[i] =
14960                                 flow_dv_tbl_resource_get(dev,
14961                                         table, egress,
14962                                         transfer,
14963                                         !!dev_flow.external,
14964                                         NULL, jump_group, 0,
14965                                         0, &flow_err);
14966                                 if
14967                                 (!sub_policy->jump_tbl[i])
14968                                         return  -rte_mtr_error_set(error,
14969                                         ENOTSUP,
14970                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14971                                         NULL, "cannot create jump action.");
14972                                 tbl_data = container_of
14973                                 (sub_policy->jump_tbl[i],
14974                                 struct mlx5_flow_tbl_data_entry, tbl);
14975                                 act_cnt->dr_jump_action[domain] =
14976                                         tbl_data->jump.action;
14977                                 act_cnt->fate_action =
14978                                         MLX5_FLOW_FATE_JUMP;
14979                                 action_flags |= MLX5_FLOW_ACTION_JUMP;
14980                                 break;
14981                         }
14982                         default:
14983                                 return -rte_mtr_error_set(error, ENOTSUP,
14984                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
14985                                           NULL, "action type not supported");
14986                         }
14987                 }
14988         }
14989         return 0;
14990 }
14991
14992 /**
14993  * Create policy action per domain, lock free,
14994  * (mutex should be acquired by caller).
14995  * Dispatcher for action type specific call.
14996  *
14997  * @param[in] dev
14998  *   Pointer to the Ethernet device structure.
14999  * @param[in] mtr_policy
15000  *   Meter policy struct.
15001  * @param[in] action
15002  *   Action specification used to create meter actions.
15003  * @param[out] error
15004  *   Perform verbose error reporting if not NULL. Initialized in case of
15005  *   error only.
15006  *
15007  * @return
15008  *   0 on success, otherwise negative errno value.
15009  */
15010 static int
15011 flow_dv_create_mtr_policy_acts(struct rte_eth_dev *dev,
15012                       struct mlx5_flow_meter_policy *mtr_policy,
15013                       const struct rte_flow_action *actions[RTE_COLORS],
15014                       struct rte_mtr_error *error)
15015 {
15016         int ret, i;
15017         uint16_t sub_policy_num;
15018
15019         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15020                 sub_policy_num = (mtr_policy->sub_policy_num >>
15021                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
15022                         MLX5_MTR_SUB_POLICY_NUM_MASK;
15023                 if (sub_policy_num) {
15024                         ret = __flow_dv_create_domain_policy_acts(dev,
15025                                 mtr_policy, actions,
15026                                 (enum mlx5_meter_domain)i, error);
15027                         if (ret)
15028                                 return ret;
15029                 }
15030         }
15031         return 0;
15032 }
15033
15034 /**
15035  * Query a DV flow rule for its statistics via DevX.
15036  *
15037  * @param[in] dev
15038  *   Pointer to Ethernet device.
15039  * @param[in] cnt_idx
15040  *   Index to the flow counter.
15041  * @param[out] data
15042  *   Data retrieved by the query.
15043  * @param[out] error
15044  *   Perform verbose error reporting if not NULL.
15045  *
15046  * @return
15047  *   0 on success, a negative errno value otherwise and rte_errno is set.
15048  */
15049 static int
15050 flow_dv_query_count(struct rte_eth_dev *dev, uint32_t cnt_idx, void *data,
15051                     struct rte_flow_error *error)
15052 {
15053         struct mlx5_priv *priv = dev->data->dev_private;
15054         struct rte_flow_query_count *qc = data;
15055
15056         if (!priv->config.devx)
15057                 return rte_flow_error_set(error, ENOTSUP,
15058                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15059                                           NULL,
15060                                           "counters are not supported");
15061         if (cnt_idx) {
15062                 uint64_t pkts, bytes;
15063                 struct mlx5_flow_counter *cnt;
15064                 int err = _flow_dv_query_count(dev, cnt_idx, &pkts, &bytes);
15065
15066                 if (err)
15067                         return rte_flow_error_set(error, -err,
15068                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15069                                         NULL, "cannot read counters");
15070                 cnt = flow_dv_counter_get_by_idx(dev, cnt_idx, NULL);
15071                 qc->hits_set = 1;
15072                 qc->bytes_set = 1;
15073                 qc->hits = pkts - cnt->hits;
15074                 qc->bytes = bytes - cnt->bytes;
15075                 if (qc->reset) {
15076                         cnt->hits = pkts;
15077                         cnt->bytes = bytes;
15078                 }
15079                 return 0;
15080         }
15081         return rte_flow_error_set(error, EINVAL,
15082                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15083                                   NULL,
15084                                   "counters are not available");
15085 }
15086
15087 static int
15088 flow_dv_action_query(struct rte_eth_dev *dev,
15089                      const struct rte_flow_action_handle *handle, void *data,
15090                      struct rte_flow_error *error)
15091 {
15092         struct mlx5_age_param *age_param;
15093         struct rte_flow_query_age *resp;
15094         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
15095         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
15096         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
15097         struct mlx5_priv *priv = dev->data->dev_private;
15098         struct mlx5_aso_ct_action *ct;
15099         uint16_t owner;
15100         uint32_t dev_idx;
15101
15102         switch (type) {
15103         case MLX5_INDIRECT_ACTION_TYPE_AGE:
15104                 age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params;
15105                 resp = data;
15106                 resp->aged = __atomic_load_n(&age_param->state,
15107                                               __ATOMIC_RELAXED) == AGE_TMOUT ?
15108                                                                           1 : 0;
15109                 resp->sec_since_last_hit_valid = !resp->aged;
15110                 if (resp->sec_since_last_hit_valid)
15111                         resp->sec_since_last_hit = __atomic_load_n
15112                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
15113                 return 0;
15114         case MLX5_INDIRECT_ACTION_TYPE_COUNT:
15115                 return flow_dv_query_count(dev, idx, data, error);
15116         case MLX5_INDIRECT_ACTION_TYPE_CT:
15117                 owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx);
15118                 if (owner != PORT_ID(priv))
15119                         return rte_flow_error_set(error, EACCES,
15120                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15121                                         NULL,
15122                                         "CT object owned by another port");
15123                 dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx);
15124                 ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx);
15125                 MLX5_ASSERT(ct);
15126                 if (!ct->refcnt)
15127                         return rte_flow_error_set(error, EFAULT,
15128                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15129                                         NULL,
15130                                         "CT object is inactive");
15131                 ((struct rte_flow_action_conntrack *)data)->peer_port =
15132                                                         ct->peer;
15133                 ((struct rte_flow_action_conntrack *)data)->is_original_dir =
15134                                                         ct->is_original;
15135                 if (mlx5_aso_ct_query_by_wqe(priv->sh, ct, data))
15136                         return rte_flow_error_set(error, EIO,
15137                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15138                                         NULL,
15139                                         "Failed to query CT context");
15140                 return 0;
15141         default:
15142                 return rte_flow_error_set(error, ENOTSUP,
15143                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
15144                                           "action type query not supported");
15145         }
15146 }
15147
15148 /**
15149  * Query a flow rule AGE action for aging information.
15150  *
15151  * @param[in] dev
15152  *   Pointer to Ethernet device.
15153  * @param[in] flow
15154  *   Pointer to the sub flow.
15155  * @param[out] data
15156  *   data retrieved by the query.
15157  * @param[out] error
15158  *   Perform verbose error reporting if not NULL.
15159  *
15160  * @return
15161  *   0 on success, a negative errno value otherwise and rte_errno is set.
15162  */
15163 static int
15164 flow_dv_query_age(struct rte_eth_dev *dev, struct rte_flow *flow,
15165                   void *data, struct rte_flow_error *error)
15166 {
15167         struct rte_flow_query_age *resp = data;
15168         struct mlx5_age_param *age_param;
15169
15170         if (flow->age) {
15171                 struct mlx5_aso_age_action *act =
15172                                      flow_aso_age_get_by_idx(dev, flow->age);
15173
15174                 age_param = &act->age_params;
15175         } else if (flow->counter) {
15176                 age_param = flow_dv_counter_idx_get_age(dev, flow->counter);
15177
15178                 if (!age_param || !age_param->timeout)
15179                         return rte_flow_error_set
15180                                         (error, EINVAL,
15181                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15182                                          NULL, "cannot read age data");
15183         } else {
15184                 return rte_flow_error_set(error, EINVAL,
15185                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15186                                           NULL, "age data not available");
15187         }
15188         resp->aged = __atomic_load_n(&age_param->state, __ATOMIC_RELAXED) ==
15189                                      AGE_TMOUT ? 1 : 0;
15190         resp->sec_since_last_hit_valid = !resp->aged;
15191         if (resp->sec_since_last_hit_valid)
15192                 resp->sec_since_last_hit = __atomic_load_n
15193                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
15194         return 0;
15195 }
15196
15197 /**
15198  * Query a flow.
15199  *
15200  * @see rte_flow_query()
15201  * @see rte_flow_ops
15202  */
15203 static int
15204 flow_dv_query(struct rte_eth_dev *dev,
15205               struct rte_flow *flow __rte_unused,
15206               const struct rte_flow_action *actions __rte_unused,
15207               void *data __rte_unused,
15208               struct rte_flow_error *error __rte_unused)
15209 {
15210         int ret = -EINVAL;
15211
15212         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
15213                 switch (actions->type) {
15214                 case RTE_FLOW_ACTION_TYPE_VOID:
15215                         break;
15216                 case RTE_FLOW_ACTION_TYPE_COUNT:
15217                         ret = flow_dv_query_count(dev, flow->counter, data,
15218                                                   error);
15219                         break;
15220                 case RTE_FLOW_ACTION_TYPE_AGE:
15221                         ret = flow_dv_query_age(dev, flow, data, error);
15222                         break;
15223                 default:
15224                         return rte_flow_error_set(error, ENOTSUP,
15225                                                   RTE_FLOW_ERROR_TYPE_ACTION,
15226                                                   actions,
15227                                                   "action not supported");
15228                 }
15229         }
15230         return ret;
15231 }
15232
15233 /**
15234  * Destroy the meter table set.
15235  * Lock free, (mutex should be acquired by caller).
15236  *
15237  * @param[in] dev
15238  *   Pointer to Ethernet device.
15239  * @param[in] fm
15240  *   Meter information table.
15241  */
15242 static void
15243 flow_dv_destroy_mtr_tbls(struct rte_eth_dev *dev,
15244                         struct mlx5_flow_meter_info *fm)
15245 {
15246         struct mlx5_priv *priv = dev->data->dev_private;
15247         int i;
15248
15249         if (!fm || !priv->config.dv_flow_en)
15250                 return;
15251         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15252                 if (fm->drop_rule[i]) {
15253                         claim_zero(mlx5_flow_os_destroy_flow(fm->drop_rule[i]));
15254                         fm->drop_rule[i] = NULL;
15255                 }
15256         }
15257 }
15258
15259 static void
15260 flow_dv_destroy_mtr_drop_tbls(struct rte_eth_dev *dev)
15261 {
15262         struct mlx5_priv *priv = dev->data->dev_private;
15263         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
15264         struct mlx5_flow_tbl_data_entry *tbl;
15265         int i, j;
15266
15267         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15268                 if (mtrmng->def_rule[i]) {
15269                         claim_zero(mlx5_flow_os_destroy_flow
15270                                         (mtrmng->def_rule[i]));
15271                         mtrmng->def_rule[i] = NULL;
15272                 }
15273                 if (mtrmng->def_matcher[i]) {
15274                         tbl = container_of(mtrmng->def_matcher[i]->tbl,
15275                                 struct mlx5_flow_tbl_data_entry, tbl);
15276                         mlx5_cache_unregister(&tbl->matchers,
15277                                       &mtrmng->def_matcher[i]->entry);
15278                         mtrmng->def_matcher[i] = NULL;
15279                 }
15280                 for (j = 0; j < MLX5_REG_BITS; j++) {
15281                         if (mtrmng->drop_matcher[i][j]) {
15282                                 tbl =
15283                                 container_of(mtrmng->drop_matcher[i][j]->tbl,
15284                                              struct mlx5_flow_tbl_data_entry,
15285                                              tbl);
15286                                 mlx5_cache_unregister(&tbl->matchers,
15287                                         &mtrmng->drop_matcher[i][j]->entry);
15288                                 mtrmng->drop_matcher[i][j] = NULL;
15289                         }
15290                 }
15291                 if (mtrmng->drop_tbl[i]) {
15292                         flow_dv_tbl_resource_release(MLX5_SH(dev),
15293                                 mtrmng->drop_tbl[i]);
15294                         mtrmng->drop_tbl[i] = NULL;
15295                 }
15296         }
15297 }
15298
15299 /* Number of meter flow actions, count and jump or count and drop. */
15300 #define METER_ACTIONS 2
15301
15302 static void
15303 __flow_dv_destroy_domain_def_policy(struct rte_eth_dev *dev,
15304                               enum mlx5_meter_domain domain)
15305 {
15306         struct mlx5_priv *priv = dev->data->dev_private;
15307         struct mlx5_flow_meter_def_policy *def_policy =
15308                         priv->sh->mtrmng->def_policy[domain];
15309
15310         __flow_dv_destroy_sub_policy_rules(dev, &def_policy->sub_policy);
15311         mlx5_free(def_policy);
15312         priv->sh->mtrmng->def_policy[domain] = NULL;
15313 }
15314
15315 /**
15316  * Destroy the default policy table set.
15317  *
15318  * @param[in] dev
15319  *   Pointer to Ethernet device.
15320  */
15321 static void
15322 flow_dv_destroy_def_policy(struct rte_eth_dev *dev)
15323 {
15324         struct mlx5_priv *priv = dev->data->dev_private;
15325         int i;
15326
15327         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++)
15328                 if (priv->sh->mtrmng->def_policy[i])
15329                         __flow_dv_destroy_domain_def_policy(dev,
15330                                         (enum mlx5_meter_domain)i);
15331         priv->sh->mtrmng->def_policy_id = MLX5_INVALID_POLICY_ID;
15332 }
15333
15334 static int
15335 __flow_dv_create_policy_flow(struct rte_eth_dev *dev,
15336                         uint32_t color_reg_c_idx,
15337                         enum rte_color color, void *matcher_object,
15338                         int actions_n, void *actions,
15339                         bool is_default_policy, void **rule,
15340                         const struct rte_flow_attr *attr)
15341 {
15342         int ret;
15343         struct mlx5_flow_dv_match_params value = {
15344                 .size = sizeof(value.buf) -
15345                         MLX5_ST_SZ_BYTES(fte_match_set_misc4),
15346         };
15347         struct mlx5_flow_dv_match_params matcher = {
15348                 .size = sizeof(matcher.buf) -
15349                         MLX5_ST_SZ_BYTES(fte_match_set_misc4),
15350         };
15351         struct mlx5_priv *priv = dev->data->dev_private;
15352
15353         if (!is_default_policy && (priv->representor || priv->master)) {
15354                 if (flow_dv_translate_item_port_id(dev, matcher.buf,
15355                                                    value.buf, NULL, attr)) {
15356                         DRV_LOG(ERR,
15357                         "Failed to create meter policy flow with port.");
15358                         return -1;
15359                 }
15360         }
15361         flow_dv_match_meta_reg(matcher.buf, value.buf,
15362                                 (enum modify_reg)color_reg_c_idx,
15363                                 rte_col_2_mlx5_col(color),
15364                                 UINT32_MAX);
15365         ret = mlx5_flow_os_create_flow(matcher_object,
15366                         (void *)&value, actions_n, actions, rule);
15367         if (ret) {
15368                 DRV_LOG(ERR, "Failed to create meter policy flow.");
15369                 return -1;
15370         }
15371         return 0;
15372 }
15373
15374 static int
15375 __flow_dv_create_policy_matcher(struct rte_eth_dev *dev,
15376                         uint32_t color_reg_c_idx,
15377                         uint16_t priority,
15378                         struct mlx5_flow_meter_sub_policy *sub_policy,
15379                         const struct rte_flow_attr *attr,
15380                         bool is_default_policy,
15381                         struct rte_flow_error *error)
15382 {
15383         struct mlx5_cache_entry *entry;
15384         struct mlx5_flow_tbl_resource *tbl_rsc = sub_policy->tbl_rsc;
15385         struct mlx5_flow_dv_matcher matcher = {
15386                 .mask = {
15387                         .size = sizeof(matcher.mask.buf) -
15388                                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
15389                 },
15390                 .tbl = tbl_rsc,
15391         };
15392         struct mlx5_flow_dv_match_params value = {
15393                 .size = sizeof(value.buf) -
15394                         MLX5_ST_SZ_BYTES(fte_match_set_misc4),
15395         };
15396         struct mlx5_flow_cb_ctx ctx = {
15397                 .error = error,
15398                 .data = &matcher,
15399         };
15400         struct mlx5_flow_tbl_data_entry *tbl_data;
15401         struct mlx5_priv *priv = dev->data->dev_private;
15402         uint32_t color_mask = (UINT32_C(1) << MLX5_MTR_COLOR_BITS) - 1;
15403
15404         if (!is_default_policy && (priv->representor || priv->master)) {
15405                 if (flow_dv_translate_item_port_id(dev, matcher.mask.buf,
15406                                                    value.buf, NULL, attr)) {
15407                         DRV_LOG(ERR,
15408                         "Failed to register meter drop matcher with port.");
15409                         return -1;
15410                 }
15411         }
15412         tbl_data = container_of(tbl_rsc, struct mlx5_flow_tbl_data_entry, tbl);
15413         if (priority < RTE_COLOR_RED)
15414                 flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
15415                         (enum modify_reg)color_reg_c_idx, 0, color_mask);
15416         matcher.priority = priority;
15417         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
15418                                         matcher.mask.size);
15419         entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
15420         if (!entry) {
15421                 DRV_LOG(ERR, "Failed to register meter drop matcher.");
15422                 return -1;
15423         }
15424         sub_policy->color_matcher[priority] =
15425                 container_of(entry, struct mlx5_flow_dv_matcher, entry);
15426         return 0;
15427 }
15428
15429 /**
15430  * Create the policy rules per domain.
15431  *
15432  * @param[in] dev
15433  *   Pointer to Ethernet device.
15434  * @param[in] sub_policy
15435  *    Pointer to sub policy table..
15436  * @param[in] egress
15437  *   Direction of the table.
15438  * @param[in] transfer
15439  *   E-Switch or NIC flow.
15440  * @param[in] acts
15441  *   Pointer to policy action list per color.
15442  *
15443  * @return
15444  *   0 on success, -1 otherwise.
15445  */
15446 static int
15447 __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev,
15448                 struct mlx5_flow_meter_sub_policy *sub_policy,
15449                 uint8_t egress, uint8_t transfer, bool is_default_policy,
15450                 struct mlx5_meter_policy_acts acts[RTE_COLORS])
15451 {
15452         struct rte_flow_error flow_err;
15453         uint32_t color_reg_c_idx;
15454         struct rte_flow_attr attr = {
15455                 .group = MLX5_FLOW_TABLE_LEVEL_POLICY,
15456                 .priority = 0,
15457                 .ingress = 0,
15458                 .egress = !!egress,
15459                 .transfer = !!transfer,
15460                 .reserved = 0,
15461         };
15462         int i;
15463         int ret = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &flow_err);
15464
15465         if (ret < 0)
15466                 return -1;
15467         /* Create policy table with POLICY level. */
15468         if (!sub_policy->tbl_rsc)
15469                 sub_policy->tbl_rsc = flow_dv_tbl_resource_get(dev,
15470                                 MLX5_FLOW_TABLE_LEVEL_POLICY,
15471                                 egress, transfer, false, NULL, 0, 0,
15472                                 sub_policy->idx, &flow_err);
15473         if (!sub_policy->tbl_rsc) {
15474                 DRV_LOG(ERR,
15475                         "Failed to create meter sub policy table.");
15476                 return -1;
15477         }
15478         /* Prepare matchers. */
15479         color_reg_c_idx = ret;
15480         for (i = 0; i < RTE_COLORS; i++) {
15481                 if (i == RTE_COLOR_YELLOW || !acts[i].actions_n)
15482                         continue;
15483                 attr.priority = i;
15484                 if (!sub_policy->color_matcher[i]) {
15485                         /* Create matchers for Color. */
15486                         if (__flow_dv_create_policy_matcher(dev,
15487                                 color_reg_c_idx, i, sub_policy,
15488                                 &attr, is_default_policy, &flow_err))
15489                                 return -1;
15490                 }
15491                 /* Create flow, matching color. */
15492                 if (acts[i].actions_n)
15493                         if (__flow_dv_create_policy_flow(dev,
15494                                 color_reg_c_idx, (enum rte_color)i,
15495                                 sub_policy->color_matcher[i]->matcher_object,
15496                                 acts[i].actions_n,
15497                                 acts[i].dv_actions,
15498                                 is_default_policy,
15499                                 &sub_policy->color_rule[i],
15500                                 &attr))
15501                                 return -1;
15502         }
15503         return 0;
15504 }
15505
15506 static int
15507 __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev,
15508                         struct mlx5_flow_meter_policy *mtr_policy,
15509                         struct mlx5_flow_meter_sub_policy *sub_policy,
15510                         uint32_t domain)
15511 {
15512         struct mlx5_priv *priv = dev->data->dev_private;
15513         struct mlx5_meter_policy_acts acts[RTE_COLORS];
15514         struct mlx5_flow_dv_tag_resource *tag;
15515         struct mlx5_flow_dv_port_id_action_resource *port_action;
15516         struct mlx5_hrxq *hrxq;
15517         uint8_t egress, transfer;
15518         int i;
15519
15520         for (i = 0; i < RTE_COLORS; i++) {
15521                 acts[i].actions_n = 0;
15522                 if (i == RTE_COLOR_YELLOW)
15523                         continue;
15524                 if (i == RTE_COLOR_RED) {
15525                         /* Only support drop on red. */
15526                         acts[i].dv_actions[0] =
15527                         mtr_policy->dr_drop_action[domain];
15528                         acts[i].actions_n = 1;
15529                         continue;
15530                 }
15531                 if (mtr_policy->act_cnt[i].rix_mark) {
15532                         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG],
15533                                         mtr_policy->act_cnt[i].rix_mark);
15534                         if (!tag) {
15535                                 DRV_LOG(ERR, "Failed to find "
15536                                 "mark action for policy.");
15537                                 return -1;
15538                         }
15539                         acts[i].dv_actions[acts[i].actions_n] =
15540                                                 tag->action;
15541                         acts[i].actions_n++;
15542                 }
15543                 if (mtr_policy->act_cnt[i].modify_hdr) {
15544                         acts[i].dv_actions[acts[i].actions_n] =
15545                         mtr_policy->act_cnt[i].modify_hdr->action;
15546                         acts[i].actions_n++;
15547                 }
15548                 if (mtr_policy->act_cnt[i].fate_action) {
15549                         switch (mtr_policy->act_cnt[i].fate_action) {
15550                         case MLX5_FLOW_FATE_PORT_ID:
15551                                 port_action = mlx5_ipool_get
15552                                         (priv->sh->ipool[MLX5_IPOOL_PORT_ID],
15553                                 mtr_policy->act_cnt[i].rix_port_id_action);
15554                                 if (!port_action) {
15555                                         DRV_LOG(ERR, "Failed to find "
15556                                                 "port action for policy.");
15557                                         return -1;
15558                                 }
15559                                 acts[i].dv_actions[acts[i].actions_n] =
15560                                 port_action->action;
15561                                 acts[i].actions_n++;
15562                                 break;
15563                         case MLX5_FLOW_FATE_DROP:
15564                         case MLX5_FLOW_FATE_JUMP:
15565                                 acts[i].dv_actions[acts[i].actions_n] =
15566                                 mtr_policy->act_cnt[i].dr_jump_action[domain];
15567                                 acts[i].actions_n++;
15568                                 break;
15569                         case MLX5_FLOW_FATE_SHARED_RSS:
15570                         case MLX5_FLOW_FATE_QUEUE:
15571                                 hrxq = mlx5_ipool_get
15572                                 (priv->sh->ipool[MLX5_IPOOL_HRXQ],
15573                                 sub_policy->rix_hrxq[i]);
15574                                 if (!hrxq) {
15575                                         DRV_LOG(ERR, "Failed to find "
15576                                                 "queue action for policy.");
15577                                         return -1;
15578                                 }
15579                                 acts[i].dv_actions[acts[i].actions_n] =
15580                                 hrxq->action;
15581                                 acts[i].actions_n++;
15582                                 break;
15583                         default:
15584                                 /*Queue action do nothing*/
15585                                 break;
15586                         }
15587                 }
15588         }
15589         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
15590         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
15591         if (__flow_dv_create_domain_policy_rules(dev, sub_policy,
15592                                 egress, transfer, false, acts)) {
15593                 DRV_LOG(ERR,
15594                 "Failed to create policy rules per domain.");
15595                 return -1;
15596         }
15597         return 0;
15598 }
15599
15600 /**
15601  * Create the policy rules.
15602  *
15603  * @param[in] dev
15604  *   Pointer to Ethernet device.
15605  * @param[in,out] mtr_policy
15606  *   Pointer to meter policy table.
15607  *
15608  * @return
15609  *   0 on success, -1 otherwise.
15610  */
15611 static int
15612 flow_dv_create_policy_rules(struct rte_eth_dev *dev,
15613                              struct mlx5_flow_meter_policy *mtr_policy)
15614 {
15615         int i;
15616         uint16_t sub_policy_num;
15617
15618         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15619                 sub_policy_num = (mtr_policy->sub_policy_num >>
15620                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
15621                         MLX5_MTR_SUB_POLICY_NUM_MASK;
15622                 if (!sub_policy_num)
15623                         continue;
15624                 /* Prepare actions list and create policy rules. */
15625                 if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
15626                         mtr_policy->sub_policys[i][0], i)) {
15627                         DRV_LOG(ERR,
15628                         "Failed to create policy action list per domain.");
15629                         return -1;
15630                 }
15631         }
15632         return 0;
15633 }
15634
15635 static int
15636 __flow_dv_create_domain_def_policy(struct rte_eth_dev *dev, uint32_t domain)
15637 {
15638         struct mlx5_priv *priv = dev->data->dev_private;
15639         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
15640         struct mlx5_flow_meter_def_policy *def_policy;
15641         struct mlx5_flow_tbl_resource *jump_tbl;
15642         struct mlx5_flow_tbl_data_entry *tbl_data;
15643         uint8_t egress, transfer;
15644         struct rte_flow_error error;
15645         struct mlx5_meter_policy_acts acts[RTE_COLORS];
15646         int ret;
15647
15648         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
15649         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
15650         def_policy = mtrmng->def_policy[domain];
15651         if (!def_policy) {
15652                 def_policy = mlx5_malloc(MLX5_MEM_ZERO,
15653                         sizeof(struct mlx5_flow_meter_def_policy),
15654                         RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
15655                 if (!def_policy) {
15656                         DRV_LOG(ERR, "Failed to alloc "
15657                                         "default policy table.");
15658                         goto def_policy_error;
15659                 }
15660                 mtrmng->def_policy[domain] = def_policy;
15661                 /* Create the meter suffix table with SUFFIX level. */
15662                 jump_tbl = flow_dv_tbl_resource_get(dev,
15663                                 MLX5_FLOW_TABLE_LEVEL_METER,
15664                                 egress, transfer, false, NULL, 0,
15665                                 0, MLX5_MTR_TABLE_ID_SUFFIX, &error);
15666                 if (!jump_tbl) {
15667                         DRV_LOG(ERR,
15668                                 "Failed to create meter suffix table.");
15669                         goto def_policy_error;
15670                 }
15671                 def_policy->sub_policy.jump_tbl[RTE_COLOR_GREEN] = jump_tbl;
15672                 tbl_data = container_of(jump_tbl,
15673                                 struct mlx5_flow_tbl_data_entry, tbl);
15674                 def_policy->dr_jump_action[RTE_COLOR_GREEN] =
15675                                                 tbl_data->jump.action;
15676                 acts[RTE_COLOR_GREEN].dv_actions[0] =
15677                                                 tbl_data->jump.action;
15678                 acts[RTE_COLOR_GREEN].actions_n = 1;
15679                 /* Create jump action to the drop table. */
15680                 if (!mtrmng->drop_tbl[domain]) {
15681                         mtrmng->drop_tbl[domain] = flow_dv_tbl_resource_get
15682                                 (dev, MLX5_FLOW_TABLE_LEVEL_METER,
15683                                 egress, transfer, false, NULL, 0,
15684                                 0, MLX5_MTR_TABLE_ID_DROP, &error);
15685                         if (!mtrmng->drop_tbl[domain]) {
15686                                 DRV_LOG(ERR, "Failed to create "
15687                                 "meter drop table for default policy.");
15688                                 goto def_policy_error;
15689                         }
15690                 }
15691                 tbl_data = container_of(mtrmng->drop_tbl[domain],
15692                                 struct mlx5_flow_tbl_data_entry, tbl);
15693                 def_policy->dr_jump_action[RTE_COLOR_RED] =
15694                                                 tbl_data->jump.action;
15695                 acts[RTE_COLOR_RED].dv_actions[0] = tbl_data->jump.action;
15696                 acts[RTE_COLOR_RED].actions_n = 1;
15697                 /* Create default policy rules. */
15698                 ret = __flow_dv_create_domain_policy_rules(dev,
15699                                         &def_policy->sub_policy,
15700                                         egress, transfer, true, acts);
15701                 if (ret) {
15702                         DRV_LOG(ERR, "Failed to create "
15703                                 "default policy rules.");
15704                                 goto def_policy_error;
15705                 }
15706         }
15707         return 0;
15708 def_policy_error:
15709         __flow_dv_destroy_domain_def_policy(dev,
15710                         (enum mlx5_meter_domain)domain);
15711         return -1;
15712 }
15713
15714 /**
15715  * Create the default policy table set.
15716  *
15717  * @param[in] dev
15718  *   Pointer to Ethernet device.
15719  * @return
15720  *   0 on success, -1 otherwise.
15721  */
15722 static int
15723 flow_dv_create_def_policy(struct rte_eth_dev *dev)
15724 {
15725         struct mlx5_priv *priv = dev->data->dev_private;
15726         int i;
15727
15728         /* Non-termination policy table. */
15729         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15730                 if (!priv->config.dv_esw_en && i == MLX5_MTR_DOMAIN_TRANSFER)
15731                         continue;
15732                 if (__flow_dv_create_domain_def_policy(dev, i)) {
15733                         DRV_LOG(ERR,
15734                         "Failed to create default policy");
15735                         return -1;
15736                 }
15737         }
15738         return 0;
15739 }
15740
15741 /**
15742  * Create the needed meter tables.
15743  * Lock free, (mutex should be acquired by caller).
15744  *
15745  * @param[in] dev
15746  *   Pointer to Ethernet device.
15747  * @param[in] fm
15748  *   Meter information table.
15749  * @param[in] mtr_idx
15750  *   Meter index.
15751  * @param[in] domain_bitmap
15752  *   Domain bitmap.
15753  * @return
15754  *   0 on success, -1 otherwise.
15755  */
15756 static int
15757 flow_dv_create_mtr_tbls(struct rte_eth_dev *dev,
15758                         struct mlx5_flow_meter_info *fm,
15759                         uint32_t mtr_idx,
15760                         uint8_t domain_bitmap)
15761 {
15762         struct mlx5_priv *priv = dev->data->dev_private;
15763         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
15764         struct rte_flow_error error;
15765         struct mlx5_flow_tbl_data_entry *tbl_data;
15766         uint8_t egress, transfer;
15767         void *actions[METER_ACTIONS];
15768         int domain, ret, i;
15769         struct mlx5_flow_counter *cnt;
15770         struct mlx5_flow_dv_match_params value = {
15771                 .size = sizeof(value.buf) -
15772                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
15773         };
15774         struct mlx5_flow_dv_match_params matcher_para = {
15775                 .size = sizeof(matcher_para.buf) -
15776                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
15777         };
15778         int mtr_id_reg_c = mlx5_flow_get_reg_id(dev, MLX5_MTR_ID,
15779                                                      0, &error);
15780         uint32_t mtr_id_mask = (UINT32_C(1) << mtrmng->max_mtr_bits) - 1;
15781         uint8_t mtr_id_offset = priv->mtr_reg_share ? MLX5_MTR_COLOR_BITS : 0;
15782         struct mlx5_cache_entry *entry;
15783         struct mlx5_flow_dv_matcher matcher = {
15784                 .mask = {
15785                         .size = sizeof(matcher.mask.buf) -
15786                         MLX5_ST_SZ_BYTES(fte_match_set_misc4),
15787                 },
15788         };
15789         struct mlx5_flow_dv_matcher *drop_matcher;
15790         struct mlx5_flow_cb_ctx ctx = {
15791                 .error = &error,
15792                 .data = &matcher,
15793         };
15794
15795         if (!priv->mtr_en || mtr_id_reg_c < 0) {
15796                 rte_errno = ENOTSUP;
15797                 return -1;
15798         }
15799         for (domain = 0; domain < MLX5_MTR_DOMAIN_MAX; domain++) {
15800                 if (!(domain_bitmap & (1 << domain)) ||
15801                         (mtrmng->def_rule[domain] && !fm->drop_cnt))
15802                         continue;
15803                 egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
15804                 transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
15805                 /* Create the drop table with METER DROP level. */
15806                 if (!mtrmng->drop_tbl[domain]) {
15807                         mtrmng->drop_tbl[domain] = flow_dv_tbl_resource_get(dev,
15808                                         MLX5_FLOW_TABLE_LEVEL_METER,
15809                                         egress, transfer, false, NULL, 0,
15810                                         0, MLX5_MTR_TABLE_ID_DROP, &error);
15811                         if (!mtrmng->drop_tbl[domain]) {
15812                                 DRV_LOG(ERR, "Failed to create meter drop table.");
15813                                 goto policy_error;
15814                         }
15815                 }
15816                 /* Create default matcher in drop table. */
15817                 matcher.tbl = mtrmng->drop_tbl[domain],
15818                 tbl_data = container_of(mtrmng->drop_tbl[domain],
15819                                 struct mlx5_flow_tbl_data_entry, tbl);
15820                 if (!mtrmng->def_matcher[domain]) {
15821                         flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
15822                                        (enum modify_reg)mtr_id_reg_c,
15823                                        0, 0);
15824                         matcher.priority = MLX5_MTRS_DEFAULT_RULE_PRIORITY;
15825                         matcher.crc = rte_raw_cksum
15826                                         ((const void *)matcher.mask.buf,
15827                                         matcher.mask.size);
15828                         entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
15829                         if (!entry) {
15830                                 DRV_LOG(ERR, "Failed to register meter "
15831                                 "drop default matcher.");
15832                                 goto policy_error;
15833                         }
15834                         mtrmng->def_matcher[domain] = container_of(entry,
15835                         struct mlx5_flow_dv_matcher, entry);
15836                 }
15837                 /* Create default rule in drop table. */
15838                 if (!mtrmng->def_rule[domain]) {
15839                         i = 0;
15840                         actions[i++] = priv->sh->dr_drop_action;
15841                         flow_dv_match_meta_reg(matcher_para.buf, value.buf,
15842                                 (enum modify_reg)mtr_id_reg_c, 0, 0);
15843                         ret = mlx5_flow_os_create_flow
15844                                 (mtrmng->def_matcher[domain]->matcher_object,
15845                                 (void *)&value, i, actions,
15846                                 &mtrmng->def_rule[domain]);
15847                         if (ret) {
15848                                 DRV_LOG(ERR, "Failed to create meter "
15849                                 "default drop rule for drop table.");
15850                                 goto policy_error;
15851                         }
15852                 }
15853                 if (!fm->drop_cnt)
15854                         continue;
15855                 MLX5_ASSERT(mtrmng->max_mtr_bits);
15856                 if (!mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1]) {
15857                         /* Create matchers for Drop. */
15858                         flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
15859                                         (enum modify_reg)mtr_id_reg_c, 0,
15860                                         (mtr_id_mask << mtr_id_offset));
15861                         matcher.priority = MLX5_REG_BITS - mtrmng->max_mtr_bits;
15862                         matcher.crc = rte_raw_cksum
15863                                         ((const void *)matcher.mask.buf,
15864                                         matcher.mask.size);
15865                         entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
15866                         if (!entry) {
15867                                 DRV_LOG(ERR,
15868                                 "Failed to register meter drop matcher.");
15869                                 goto policy_error;
15870                         }
15871                         mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1] =
15872                                 container_of(entry, struct mlx5_flow_dv_matcher,
15873                                              entry);
15874                 }
15875                 drop_matcher =
15876                         mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1];
15877                 /* Create drop rule, matching meter_id only. */
15878                 flow_dv_match_meta_reg(matcher_para.buf, value.buf,
15879                                 (enum modify_reg)mtr_id_reg_c,
15880                                 (mtr_idx << mtr_id_offset), UINT32_MAX);
15881                 i = 0;
15882                 cnt = flow_dv_counter_get_by_idx(dev,
15883                                         fm->drop_cnt, NULL);
15884                 actions[i++] = cnt->action;
15885                 actions[i++] = priv->sh->dr_drop_action;
15886                 ret = mlx5_flow_os_create_flow(drop_matcher->matcher_object,
15887                                                (void *)&value, i, actions,
15888                                                &fm->drop_rule[domain]);
15889                 if (ret) {
15890                         DRV_LOG(ERR, "Failed to create meter "
15891                                 "drop rule for drop table.");
15892                                 goto policy_error;
15893                 }
15894         }
15895         return 0;
15896 policy_error:
15897         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15898                 if (fm->drop_rule[i]) {
15899                         claim_zero(mlx5_flow_os_destroy_flow
15900                                 (fm->drop_rule[i]));
15901                         fm->drop_rule[i] = NULL;
15902                 }
15903         }
15904         return -1;
15905 }
15906
15907 /**
15908  * Find the policy table for prefix table with RSS.
15909  *
15910  * @param[in] dev
15911  *   Pointer to Ethernet device.
15912  * @param[in] mtr_policy
15913  *   Pointer to meter policy table.
15914  * @param[in] rss_desc
15915  *   Pointer to rss_desc
15916  * @return
15917  *   Pointer to table set on success, NULL otherwise and rte_errno is set.
15918  */
15919 static struct mlx5_flow_meter_sub_policy *
15920 flow_dv_meter_sub_policy_rss_prepare(struct rte_eth_dev *dev,
15921                 struct mlx5_flow_meter_policy *mtr_policy,
15922                 struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS])
15923 {
15924         struct mlx5_priv *priv = dev->data->dev_private;
15925         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
15926         uint32_t sub_policy_idx = 0;
15927         uint32_t hrxq_idx[MLX5_MTR_RTE_COLORS] = {0};
15928         uint32_t i, j;
15929         struct mlx5_hrxq *hrxq;
15930         struct mlx5_flow_handle dh;
15931         struct mlx5_meter_policy_action_container *act_cnt;
15932         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
15933         uint16_t sub_policy_num;
15934
15935         rte_spinlock_lock(&mtr_policy->sl);
15936         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
15937                 if (!rss_desc[i])
15938                         continue;
15939                 hrxq_idx[i] = mlx5_hrxq_get(dev, rss_desc[i]);
15940                 if (!hrxq_idx[i]) {
15941                         rte_spinlock_unlock(&mtr_policy->sl);
15942                         return NULL;
15943                 }
15944         }
15945         sub_policy_num = (mtr_policy->sub_policy_num >>
15946                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
15947                         MLX5_MTR_SUB_POLICY_NUM_MASK;
15948         for (i = 0; i < sub_policy_num;
15949                 i++) {
15950                 for (j = 0; j < MLX5_MTR_RTE_COLORS; j++) {
15951                         if (rss_desc[j] &&
15952                                 hrxq_idx[j] !=
15953                         mtr_policy->sub_policys[domain][i]->rix_hrxq[j])
15954                                 break;
15955                 }
15956                 if (j >= MLX5_MTR_RTE_COLORS) {
15957                         /*
15958                          * Found the sub policy table with
15959                          * the same queue per color
15960                          */
15961                         rte_spinlock_unlock(&mtr_policy->sl);
15962                         for (j = 0; j < MLX5_MTR_RTE_COLORS; j++)
15963                                 mlx5_hrxq_release(dev, hrxq_idx[j]);
15964                         return mtr_policy->sub_policys[domain][i];
15965                 }
15966         }
15967         /* Create sub policy. */
15968         if (!mtr_policy->sub_policys[domain][0]->rix_hrxq[0]) {
15969                 /* Reuse the first dummy sub_policy*/
15970                 sub_policy = mtr_policy->sub_policys[domain][0];
15971                 sub_policy_idx = sub_policy->idx;
15972         } else {
15973                 sub_policy = mlx5_ipool_zmalloc
15974                                 (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
15975                                 &sub_policy_idx);
15976                 if (!sub_policy ||
15977                         sub_policy_idx > MLX5_MAX_SUB_POLICY_TBL_NUM) {
15978                         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++)
15979                                 mlx5_hrxq_release(dev, hrxq_idx[i]);
15980                         goto rss_sub_policy_error;
15981                 }
15982                 sub_policy->idx = sub_policy_idx;
15983                 sub_policy->main_policy = mtr_policy;
15984         }
15985         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
15986                 if (!rss_desc[i])
15987                         continue;
15988                 sub_policy->rix_hrxq[i] = hrxq_idx[i];
15989                 /*
15990                  * Overwrite the last action from
15991                  * RSS action to Queue action.
15992                  */
15993                 hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
15994                               hrxq_idx[i]);
15995                 if (!hrxq) {
15996                         DRV_LOG(ERR, "Failed to create policy hrxq");
15997                         goto rss_sub_policy_error;
15998                 }
15999                 act_cnt = &mtr_policy->act_cnt[i];
16000                 if (act_cnt->rix_mark || act_cnt->modify_hdr) {
16001                         memset(&dh, 0, sizeof(struct mlx5_flow_handle));
16002                         if (act_cnt->rix_mark)
16003                                 dh.mark = 1;
16004                         dh.fate_action = MLX5_FLOW_FATE_QUEUE;
16005                         dh.rix_hrxq = hrxq_idx[i];
16006                         flow_drv_rxq_flags_set(dev, &dh);
16007                 }
16008         }
16009         if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
16010                 sub_policy, domain)) {
16011                 DRV_LOG(ERR, "Failed to create policy "
16012                         "rules per domain.");
16013                 goto rss_sub_policy_error;
16014         }
16015         if (sub_policy != mtr_policy->sub_policys[domain][0]) {
16016                 i = (mtr_policy->sub_policy_num >>
16017                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16018                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16019                 mtr_policy->sub_policys[domain][i] = sub_policy;
16020                 i++;
16021                 if (i > MLX5_MTR_RSS_MAX_SUB_POLICY)
16022                         goto rss_sub_policy_error;
16023                 mtr_policy->sub_policy_num &= ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
16024                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
16025                 mtr_policy->sub_policy_num |=
16026                         (i & MLX5_MTR_SUB_POLICY_NUM_MASK) <<
16027                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
16028         }
16029         rte_spinlock_unlock(&mtr_policy->sl);
16030         return sub_policy;
16031 rss_sub_policy_error:
16032         if (sub_policy) {
16033                 __flow_dv_destroy_sub_policy_rules(dev, sub_policy);
16034                 if (sub_policy != mtr_policy->sub_policys[domain][0]) {
16035                         i = (mtr_policy->sub_policy_num >>
16036                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16037                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16038                         mtr_policy->sub_policys[domain][i] = NULL;
16039                         mlx5_ipool_free
16040                         (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16041                                         sub_policy->idx);
16042                 }
16043         }
16044         if (sub_policy_idx)
16045                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16046                         sub_policy_idx);
16047         rte_spinlock_unlock(&mtr_policy->sl);
16048         return NULL;
16049 }
16050
16051
16052 /**
16053  * Destroy the sub policy table with RX queue.
16054  *
16055  * @param[in] dev
16056  *   Pointer to Ethernet device.
16057  * @param[in] mtr_policy
16058  *   Pointer to meter policy table.
16059  */
16060 static void
16061 flow_dv_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev,
16062                 struct mlx5_flow_meter_policy *mtr_policy)
16063 {
16064         struct mlx5_priv *priv = dev->data->dev_private;
16065         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
16066         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
16067         uint32_t i, j;
16068         uint16_t sub_policy_num, new_policy_num;
16069
16070         rte_spinlock_lock(&mtr_policy->sl);
16071         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
16072                 switch (mtr_policy->act_cnt[i].fate_action) {
16073                 case MLX5_FLOW_FATE_SHARED_RSS:
16074                         sub_policy_num = (mtr_policy->sub_policy_num >>
16075                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16076                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16077                         new_policy_num = sub_policy_num;
16078                         for (j = 0; j < sub_policy_num; j++) {
16079                                 sub_policy =
16080                                         mtr_policy->sub_policys[domain][j];
16081                                 if (sub_policy) {
16082                                         __flow_dv_destroy_sub_policy_rules(dev,
16083                                                 sub_policy);
16084                                 if (sub_policy !=
16085                                         mtr_policy->sub_policys[domain][0]) {
16086                                         mtr_policy->sub_policys[domain][j] =
16087                                                                 NULL;
16088                                         mlx5_ipool_free
16089                                 (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16090                                                 sub_policy->idx);
16091                                                 new_policy_num--;
16092                                         }
16093                                 }
16094                         }
16095                         if (new_policy_num != sub_policy_num) {
16096                                 mtr_policy->sub_policy_num &=
16097                                 ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
16098                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
16099                                 mtr_policy->sub_policy_num |=
16100                                 (new_policy_num &
16101                                         MLX5_MTR_SUB_POLICY_NUM_MASK) <<
16102                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
16103                         }
16104                         break;
16105                 case MLX5_FLOW_FATE_QUEUE:
16106                         sub_policy = mtr_policy->sub_policys[domain][0];
16107                         __flow_dv_destroy_sub_policy_rules(dev,
16108                                                 sub_policy);
16109                         break;
16110                 default:
16111                         /*Other actions without queue and do nothing*/
16112                         break;
16113                 }
16114         }
16115         rte_spinlock_unlock(&mtr_policy->sl);
16116 }
16117
16118 /**
16119  * Validate the batch counter support in root table.
16120  *
16121  * Create a simple flow with invalid counter and drop action on root table to
16122  * validate if batch counter with offset on root table is supported or not.
16123  *
16124  * @param[in] dev
16125  *   Pointer to rte_eth_dev structure.
16126  *
16127  * @return
16128  *   0 on success, a negative errno value otherwise and rte_errno is set.
16129  */
16130 int
16131 mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
16132 {
16133         struct mlx5_priv *priv = dev->data->dev_private;
16134         struct mlx5_dev_ctx_shared *sh = priv->sh;
16135         struct mlx5_flow_dv_match_params mask = {
16136                 .size = sizeof(mask.buf),
16137         };
16138         struct mlx5_flow_dv_match_params value = {
16139                 .size = sizeof(value.buf),
16140         };
16141         struct mlx5dv_flow_matcher_attr dv_attr = {
16142                 .type = IBV_FLOW_ATTR_NORMAL | IBV_FLOW_ATTR_FLAGS_EGRESS,
16143                 .priority = 0,
16144                 .match_criteria_enable = 0,
16145                 .match_mask = (void *)&mask,
16146         };
16147         void *actions[2] = { 0 };
16148         struct mlx5_flow_tbl_resource *tbl = NULL;
16149         struct mlx5_devx_obj *dcs = NULL;
16150         void *matcher = NULL;
16151         void *flow = NULL;
16152         int ret = -1;
16153
16154         tbl = flow_dv_tbl_resource_get(dev, 0, 1, 0, false, NULL,
16155                                         0, 0, 0, NULL);
16156         if (!tbl)
16157                 goto err;
16158         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
16159         if (!dcs)
16160                 goto err;
16161         ret = mlx5_flow_os_create_flow_action_count(dcs->obj, UINT16_MAX,
16162                                                     &actions[0]);
16163         if (ret)
16164                 goto err;
16165         dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
16166         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
16167                                                &matcher);
16168         if (ret)
16169                 goto err;
16170         ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 1,
16171                                        actions, &flow);
16172 err:
16173         /*
16174          * If batch counter with offset is not supported, the driver will not
16175          * validate the invalid offset value, flow create should success.
16176          * In this case, it means batch counter is not supported in root table.
16177          *
16178          * Otherwise, if flow create is failed, counter offset is supported.
16179          */
16180         if (flow) {
16181                 DRV_LOG(INFO, "Batch counter is not supported in root "
16182                               "table. Switch to fallback mode.");
16183                 rte_errno = ENOTSUP;
16184                 ret = -rte_errno;
16185                 claim_zero(mlx5_flow_os_destroy_flow(flow));
16186         } else {
16187                 /* Check matcher to make sure validate fail at flow create. */
16188                 if (!matcher || (matcher && errno != EINVAL))
16189                         DRV_LOG(ERR, "Unexpected error in counter offset "
16190                                      "support detection");
16191                 ret = 0;
16192         }
16193         if (actions[0])
16194                 claim_zero(mlx5_flow_os_destroy_flow_action(actions[0]));
16195         if (matcher)
16196                 claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
16197         if (tbl)
16198                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
16199         if (dcs)
16200                 claim_zero(mlx5_devx_cmd_destroy(dcs));
16201         return ret;
16202 }
16203
16204 /**
16205  * Query a devx counter.
16206  *
16207  * @param[in] dev
16208  *   Pointer to the Ethernet device structure.
16209  * @param[in] cnt
16210  *   Index to the flow counter.
16211  * @param[in] clear
16212  *   Set to clear the counter statistics.
16213  * @param[out] pkts
16214  *   The statistics value of packets.
16215  * @param[out] bytes
16216  *   The statistics value of bytes.
16217  *
16218  * @return
16219  *   0 on success, otherwise return -1.
16220  */
16221 static int
16222 flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear,
16223                       uint64_t *pkts, uint64_t *bytes)
16224 {
16225         struct mlx5_priv *priv = dev->data->dev_private;
16226         struct mlx5_flow_counter *cnt;
16227         uint64_t inn_pkts, inn_bytes;
16228         int ret;
16229
16230         if (!priv->config.devx)
16231                 return -1;
16232
16233         ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes);
16234         if (ret)
16235                 return -1;
16236         cnt = flow_dv_counter_get_by_idx(dev, counter, NULL);
16237         *pkts = inn_pkts - cnt->hits;
16238         *bytes = inn_bytes - cnt->bytes;
16239         if (clear) {
16240                 cnt->hits = inn_pkts;
16241                 cnt->bytes = inn_bytes;
16242         }
16243         return 0;
16244 }
16245
16246 /**
16247  * Get aged-out flows.
16248  *
16249  * @param[in] dev
16250  *   Pointer to the Ethernet device structure.
16251  * @param[in] context
16252  *   The address of an array of pointers to the aged-out flows contexts.
16253  * @param[in] nb_contexts
16254  *   The length of context array pointers.
16255  * @param[out] error
16256  *   Perform verbose error reporting if not NULL. Initialized in case of
16257  *   error only.
16258  *
16259  * @return
16260  *   how many contexts get in success, otherwise negative errno value.
16261  *   if nb_contexts is 0, return the amount of all aged contexts.
16262  *   if nb_contexts is not 0 , return the amount of aged flows reported
16263  *   in the context array.
16264  * @note: only stub for now
16265  */
16266 static int
16267 flow_get_aged_flows(struct rte_eth_dev *dev,
16268                     void **context,
16269                     uint32_t nb_contexts,
16270                     struct rte_flow_error *error)
16271 {
16272         struct mlx5_priv *priv = dev->data->dev_private;
16273         struct mlx5_age_info *age_info;
16274         struct mlx5_age_param *age_param;
16275         struct mlx5_flow_counter *counter;
16276         struct mlx5_aso_age_action *act;
16277         int nb_flows = 0;
16278
16279         if (nb_contexts && !context)
16280                 return rte_flow_error_set(error, EINVAL,
16281                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
16282                                           NULL, "empty context");
16283         age_info = GET_PORT_AGE_INFO(priv);
16284         rte_spinlock_lock(&age_info->aged_sl);
16285         LIST_FOREACH(act, &age_info->aged_aso, next) {
16286                 nb_flows++;
16287                 if (nb_contexts) {
16288                         context[nb_flows - 1] =
16289                                                 act->age_params.context;
16290                         if (!(--nb_contexts))
16291                                 break;
16292                 }
16293         }
16294         TAILQ_FOREACH(counter, &age_info->aged_counters, next) {
16295                 nb_flows++;
16296                 if (nb_contexts) {
16297                         age_param = MLX5_CNT_TO_AGE(counter);
16298                         context[nb_flows - 1] = age_param->context;
16299                         if (!(--nb_contexts))
16300                                 break;
16301                 }
16302         }
16303         rte_spinlock_unlock(&age_info->aged_sl);
16304         MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
16305         return nb_flows;
16306 }
16307
16308 /*
16309  * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
16310  */
16311 static uint32_t
16312 flow_dv_counter_allocate(struct rte_eth_dev *dev)
16313 {
16314         return flow_dv_counter_alloc(dev, 0);
16315 }
16316
16317 /**
16318  * Validate indirect action.
16319  * Dispatcher for action type specific validation.
16320  *
16321  * @param[in] dev
16322  *   Pointer to the Ethernet device structure.
16323  * @param[in] conf
16324  *   Indirect action configuration.
16325  * @param[in] action
16326  *   The indirect action object to validate.
16327  * @param[out] error
16328  *   Perform verbose error reporting if not NULL. Initialized in case of
16329  *   error only.
16330  *
16331  * @return
16332  *   0 on success, otherwise negative errno value.
16333  */
16334 static int
16335 flow_dv_action_validate(struct rte_eth_dev *dev,
16336                         const struct rte_flow_indir_action_conf *conf,
16337                         const struct rte_flow_action *action,
16338                         struct rte_flow_error *err)
16339 {
16340         struct mlx5_priv *priv = dev->data->dev_private;
16341
16342         RTE_SET_USED(conf);
16343         switch (action->type) {
16344         case RTE_FLOW_ACTION_TYPE_RSS:
16345                 /*
16346                  * priv->obj_ops is set according to driver capabilities.
16347                  * When DevX capabilities are
16348                  * sufficient, it is set to devx_obj_ops.
16349                  * Otherwise, it is set to ibv_obj_ops.
16350                  * ibv_obj_ops doesn't support ind_table_modify operation.
16351                  * In this case the indirect RSS action can't be used.
16352                  */
16353                 if (priv->obj_ops.ind_table_modify == NULL)
16354                         return rte_flow_error_set
16355                                         (err, ENOTSUP,
16356                                          RTE_FLOW_ERROR_TYPE_ACTION,
16357                                          NULL,
16358                                          "Indirect RSS action not supported");
16359                 return mlx5_validate_action_rss(dev, action, err);
16360         case RTE_FLOW_ACTION_TYPE_AGE:
16361                 if (!priv->sh->aso_age_mng)
16362                         return rte_flow_error_set(err, ENOTSUP,
16363                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
16364                                                 NULL,
16365                                                 "Indirect age action not supported");
16366                 return flow_dv_validate_action_age(0, action, dev, err);
16367         case RTE_FLOW_ACTION_TYPE_COUNT:
16368                 /*
16369                  * There are two mechanisms to share the action count.
16370                  * The old mechanism uses the shared field to share, while the
16371                  * new mechanism uses the indirect action API.
16372                  * This validation comes to make sure that the two mechanisms
16373                  * are not combined.
16374                  */
16375                 if (is_shared_action_count(action))
16376                         return rte_flow_error_set(err, ENOTSUP,
16377                                                   RTE_FLOW_ERROR_TYPE_ACTION,
16378                                                   NULL,
16379                                                   "Mix shared and indirect counter is not supported");
16380                 return flow_dv_validate_action_count(dev, true, 0, err);
16381         case RTE_FLOW_ACTION_TYPE_CONNTRACK:
16382                 if (!priv->sh->ct_aso_en)
16383                         return rte_flow_error_set(err, ENOTSUP,
16384                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
16385                                         "ASO CT is not supported");
16386                 return mlx5_validate_action_ct(dev, action->conf, err);
16387         default:
16388                 return rte_flow_error_set(err, ENOTSUP,
16389                                           RTE_FLOW_ERROR_TYPE_ACTION,
16390                                           NULL,
16391                                           "action type not supported");
16392         }
16393 }
16394
16395 /**
16396  * Validate meter policy actions.
16397  * Dispatcher for action type specific validation.
16398  *
16399  * @param[in] dev
16400  *   Pointer to the Ethernet device structure.
16401  * @param[in] action
16402  *   The meter policy action object to validate.
16403  * @param[in] attr
16404  *   Attributes of flow to determine steering domain.
16405  * @param[out] error
16406  *   Perform verbose error reporting if not NULL. Initialized in case of
16407  *   error only.
16408  *
16409  * @return
16410  *   0 on success, otherwise negative errno value.
16411  */
16412 static int
16413 flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev,
16414                         const struct rte_flow_action *actions[RTE_COLORS],
16415                         struct rte_flow_attr *attr,
16416                         bool *is_rss,
16417                         uint8_t *domain_bitmap,
16418                         bool *is_def_policy,
16419                         struct rte_mtr_error *error)
16420 {
16421         struct mlx5_priv *priv = dev->data->dev_private;
16422         struct mlx5_dev_config *dev_conf = &priv->config;
16423         const struct rte_flow_action *act;
16424         uint64_t action_flags = 0;
16425         int actions_n;
16426         int i, ret;
16427         struct rte_flow_error flow_err;
16428         uint8_t domain_color[RTE_COLORS] = {0};
16429         uint8_t def_domain = MLX5_MTR_ALL_DOMAIN_BIT;
16430
16431         if (!priv->config.dv_esw_en)
16432                 def_domain &= ~MLX5_MTR_DOMAIN_TRANSFER_BIT;
16433         *domain_bitmap = def_domain;
16434         if (actions[RTE_COLOR_YELLOW] &&
16435                 actions[RTE_COLOR_YELLOW]->type != RTE_FLOW_ACTION_TYPE_END)
16436                 return -rte_mtr_error_set(error, ENOTSUP,
16437                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
16438                                 NULL,
16439                                 "Yellow color does not support any action.");
16440         if (actions[RTE_COLOR_YELLOW] &&
16441                 actions[RTE_COLOR_YELLOW]->type != RTE_FLOW_ACTION_TYPE_DROP)
16442                 return -rte_mtr_error_set(error, ENOTSUP,
16443                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
16444                                 NULL, "Red color only supports drop action.");
16445         /*
16446          * Check default policy actions:
16447          * Green/Yellow: no action, Red: drop action
16448          */
16449         if ((!actions[RTE_COLOR_GREEN] ||
16450                 actions[RTE_COLOR_GREEN]->type == RTE_FLOW_ACTION_TYPE_END)) {
16451                 *is_def_policy = true;
16452                 return 0;
16453         }
16454         flow_err.message = NULL;
16455         for (i = 0; i < RTE_COLORS; i++) {
16456                 act = actions[i];
16457                 for (action_flags = 0, actions_n = 0;
16458                         act && act->type != RTE_FLOW_ACTION_TYPE_END;
16459                         act++) {
16460                         if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
16461                                 return -rte_mtr_error_set(error, ENOTSUP,
16462                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
16463                                           NULL, "too many actions");
16464                         switch (act->type) {
16465                         case RTE_FLOW_ACTION_TYPE_PORT_ID:
16466                                 if (!priv->config.dv_esw_en)
16467                                         return -rte_mtr_error_set(error,
16468                                         ENOTSUP,
16469                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16470                                         NULL, "PORT action validate check"
16471                                         " fail for ESW disable");
16472                                 ret = flow_dv_validate_action_port_id(dev,
16473                                                 action_flags,
16474                                                 act, attr, &flow_err);
16475                                 if (ret)
16476                                         return -rte_mtr_error_set(error,
16477                                         ENOTSUP,
16478                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16479                                         NULL, flow_err.message ?
16480                                         flow_err.message :
16481                                         "PORT action validate check fail");
16482                                 ++actions_n;
16483                                 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
16484                                 break;
16485                         case RTE_FLOW_ACTION_TYPE_MARK:
16486                                 ret = flow_dv_validate_action_mark(dev, act,
16487                                                            action_flags,
16488                                                            attr, &flow_err);
16489                                 if (ret < 0)
16490                                         return -rte_mtr_error_set(error,
16491                                         ENOTSUP,
16492                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16493                                         NULL, flow_err.message ?
16494                                         flow_err.message :
16495                                         "Mark action validate check fail");
16496                                 if (dev_conf->dv_xmeta_en !=
16497                                         MLX5_XMETA_MODE_LEGACY)
16498                                         return -rte_mtr_error_set(error,
16499                                         ENOTSUP,
16500                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16501                                         NULL, "Extend MARK action is "
16502                                         "not supported. Please try use "
16503                                         "default policy for meter.");
16504                                 action_flags |= MLX5_FLOW_ACTION_MARK;
16505                                 ++actions_n;
16506                                 break;
16507                         case RTE_FLOW_ACTION_TYPE_SET_TAG:
16508                                 ret = flow_dv_validate_action_set_tag(dev,
16509                                                         act, action_flags,
16510                                                         attr, &flow_err);
16511                                 if (ret)
16512                                         return -rte_mtr_error_set(error,
16513                                         ENOTSUP,
16514                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16515                                         NULL, flow_err.message ?
16516                                         flow_err.message :
16517                                         "Set tag action validate check fail");
16518                                 /*
16519                                  * Count all modify-header actions
16520                                  * as one action.
16521                                  */
16522                                 if (!(action_flags &
16523                                         MLX5_FLOW_MODIFY_HDR_ACTIONS))
16524                                         ++actions_n;
16525                                 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
16526                                 break;
16527                         case RTE_FLOW_ACTION_TYPE_DROP:
16528                                 ret = mlx5_flow_validate_action_drop
16529                                         (action_flags,
16530                                         attr, &flow_err);
16531                                 if (ret < 0)
16532                                         return -rte_mtr_error_set(error,
16533                                         ENOTSUP,
16534                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16535                                         NULL, flow_err.message ?
16536                                         flow_err.message :
16537                                         "Drop action validate check fail");
16538                                 action_flags |= MLX5_FLOW_ACTION_DROP;
16539                                 ++actions_n;
16540                                 break;
16541                         case RTE_FLOW_ACTION_TYPE_QUEUE:
16542                                 /*
16543                                  * Check whether extensive
16544                                  * metadata feature is engaged.
16545                                  */
16546                                 if (dev_conf->dv_flow_en &&
16547                                         (dev_conf->dv_xmeta_en !=
16548                                         MLX5_XMETA_MODE_LEGACY) &&
16549                                         mlx5_flow_ext_mreg_supported(dev))
16550                                         return -rte_mtr_error_set(error,
16551                                           ENOTSUP,
16552                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
16553                                           NULL, "Queue action with meta "
16554                                           "is not supported. Please try use "
16555                                           "default policy for meter.");
16556                                 ret = mlx5_flow_validate_action_queue(act,
16557                                                         action_flags, dev,
16558                                                         attr, &flow_err);
16559                                 if (ret < 0)
16560                                         return -rte_mtr_error_set(error,
16561                                           ENOTSUP,
16562                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
16563                                           NULL, flow_err.message ?
16564                                           flow_err.message :
16565                                           "Queue action validate check fail");
16566                                 action_flags |= MLX5_FLOW_ACTION_QUEUE;
16567                                 ++actions_n;
16568                                 break;
16569                         case RTE_FLOW_ACTION_TYPE_RSS:
16570                                 if (dev_conf->dv_flow_en &&
16571                                         (dev_conf->dv_xmeta_en !=
16572                                         MLX5_XMETA_MODE_LEGACY) &&
16573                                         mlx5_flow_ext_mreg_supported(dev))
16574                                         return -rte_mtr_error_set(error,
16575                                           ENOTSUP,
16576                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
16577                                           NULL, "RSS action with meta "
16578                                           "is not supported. Please try use "
16579                                           "default policy for meter.");
16580                                 ret = mlx5_validate_action_rss(dev, act,
16581                                                 &flow_err);
16582                                 if (ret < 0)
16583                                         return -rte_mtr_error_set(error,
16584                                           ENOTSUP,
16585                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
16586                                           NULL, flow_err.message ?
16587                                           flow_err.message :
16588                                           "RSS action validate check fail");
16589                                 action_flags |= MLX5_FLOW_ACTION_RSS;
16590                                 ++actions_n;
16591                                 *is_rss = true;
16592                                 break;
16593                         case RTE_FLOW_ACTION_TYPE_JUMP:
16594                                 ret = flow_dv_validate_action_jump(dev,
16595                                         NULL, act, action_flags,
16596                                         attr, true, &flow_err);
16597                                 if (ret)
16598                                         return -rte_mtr_error_set(error,
16599                                           ENOTSUP,
16600                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
16601                                           NULL, flow_err.message ?
16602                                           flow_err.message :
16603                                           "Jump action validate check fail");
16604                                 ++actions_n;
16605                                 action_flags |= MLX5_FLOW_ACTION_JUMP;
16606                                 break;
16607                         default:
16608                                 return -rte_mtr_error_set(error, ENOTSUP,
16609                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16610                                         NULL,
16611                                         "Doesn't support optional action");
16612                         }
16613                 }
16614                 /* Yellow is not supported, just skip. */
16615                 if (i == RTE_COLOR_YELLOW)
16616                         continue;
16617                 if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
16618                         domain_color[i] = MLX5_MTR_DOMAIN_TRANSFER_BIT;
16619                 else if ((action_flags &
16620                         (MLX5_FLOW_ACTION_RSS | MLX5_FLOW_ACTION_QUEUE)) ||
16621                         (action_flags & MLX5_FLOW_ACTION_MARK))
16622                         /*
16623                          * Only support MLX5_XMETA_MODE_LEGACY
16624                          * so MARK action only in ingress domain.
16625                          */
16626                         domain_color[i] = MLX5_MTR_DOMAIN_INGRESS_BIT;
16627                 else
16628                         domain_color[i] = def_domain;
16629                 /*
16630                  * Validate the drop action mutual exclusion
16631                  * with other actions. Drop action is mutually-exclusive
16632                  * with any other action, except for Count action.
16633                  */
16634                 if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
16635                         (action_flags & ~MLX5_FLOW_ACTION_DROP)) {
16636                         return -rte_mtr_error_set(error, ENOTSUP,
16637                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
16638                                 NULL, "Drop action is mutually-exclusive "
16639                                 "with any other action");
16640                 }
16641                 /* Eswitch has few restrictions on using items and actions */
16642                 if (domain_color[i] & MLX5_MTR_DOMAIN_TRANSFER_BIT) {
16643                         if (!mlx5_flow_ext_mreg_supported(dev) &&
16644                                 action_flags & MLX5_FLOW_ACTION_MARK)
16645                                 return -rte_mtr_error_set(error, ENOTSUP,
16646                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16647                                         NULL, "unsupported action MARK");
16648                         if (action_flags & MLX5_FLOW_ACTION_QUEUE)
16649                                 return -rte_mtr_error_set(error, ENOTSUP,
16650                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16651                                         NULL, "unsupported action QUEUE");
16652                         if (action_flags & MLX5_FLOW_ACTION_RSS)
16653                                 return -rte_mtr_error_set(error, ENOTSUP,
16654                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16655                                         NULL, "unsupported action RSS");
16656                         if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
16657                                 return -rte_mtr_error_set(error, ENOTSUP,
16658                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16659                                         NULL, "no fate action is found");
16660                 } else {
16661                         if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) &&
16662                                 (domain_color[i] &
16663                                 MLX5_MTR_DOMAIN_INGRESS_BIT)) {
16664                                 if ((domain_color[i] &
16665                                         MLX5_MTR_DOMAIN_EGRESS_BIT))
16666                                         domain_color[i] =
16667                                         MLX5_MTR_DOMAIN_EGRESS_BIT;
16668                                 else
16669                                         return -rte_mtr_error_set(error,
16670                                         ENOTSUP,
16671                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16672                                         NULL, "no fate action is found");
16673                         }
16674                 }
16675                 if (domain_color[i] != def_domain)
16676                         *domain_bitmap = domain_color[i];
16677         }
16678         return 0;
16679 }
16680
16681 static int
16682 flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags)
16683 {
16684         struct mlx5_priv *priv = dev->data->dev_private;
16685         int ret = 0;
16686
16687         if ((domains & MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain != NULL) {
16688                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->rx_domain,
16689                                                 flags);
16690                 if (ret != 0)
16691                         return ret;
16692         }
16693         if ((domains & MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain != NULL) {
16694                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->tx_domain, flags);
16695                 if (ret != 0)
16696                         return ret;
16697         }
16698         if ((domains & MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain != NULL) {
16699                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->fdb_domain, flags);
16700                 if (ret != 0)
16701                         return ret;
16702         }
16703         return 0;
16704 }
16705
16706 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
16707         .validate = flow_dv_validate,
16708         .prepare = flow_dv_prepare,
16709         .translate = flow_dv_translate,
16710         .apply = flow_dv_apply,
16711         .remove = flow_dv_remove,
16712         .destroy = flow_dv_destroy,
16713         .query = flow_dv_query,
16714         .create_mtr_tbls = flow_dv_create_mtr_tbls,
16715         .destroy_mtr_tbls = flow_dv_destroy_mtr_tbls,
16716         .destroy_mtr_drop_tbls = flow_dv_destroy_mtr_drop_tbls,
16717         .create_meter = flow_dv_mtr_alloc,
16718         .free_meter = flow_dv_aso_mtr_release_to_pool,
16719         .validate_mtr_acts = flow_dv_validate_mtr_policy_acts,
16720         .create_mtr_acts = flow_dv_create_mtr_policy_acts,
16721         .destroy_mtr_acts = flow_dv_destroy_mtr_policy_acts,
16722         .create_policy_rules = flow_dv_create_policy_rules,
16723         .destroy_policy_rules = flow_dv_destroy_policy_rules,
16724         .create_def_policy = flow_dv_create_def_policy,
16725         .destroy_def_policy = flow_dv_destroy_def_policy,
16726         .meter_sub_policy_rss_prepare = flow_dv_meter_sub_policy_rss_prepare,
16727         .destroy_sub_policy_with_rxq = flow_dv_destroy_sub_policy_with_rxq,
16728         .counter_alloc = flow_dv_counter_allocate,
16729         .counter_free = flow_dv_counter_free,
16730         .counter_query = flow_dv_counter_query,
16731         .get_aged_flows = flow_get_aged_flows,
16732         .action_validate = flow_dv_action_validate,
16733         .action_create = flow_dv_action_create,
16734         .action_destroy = flow_dv_action_destroy,
16735         .action_update = flow_dv_action_update,
16736         .action_query = flow_dv_action_query,
16737         .sync_domain = flow_dv_sync_domain,
16738 };
16739
16740 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
16741