4c7eb6cb0e7557307a62c3d8e280142b14d1ca97
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <ethdev_driver.h>
14 #include <rte_flow.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_cycles.h>
18 #include <rte_ip.h>
19 #include <rte_gre.h>
20 #include <rte_vxlan.h>
21 #include <rte_gtp.h>
22 #include <rte_eal_paging.h>
23 #include <rte_mpls.h>
24 #include <rte_mtr.h>
25 #include <rte_mtr_driver.h>
26
27 #include <mlx5_glue.h>
28 #include <mlx5_devx_cmds.h>
29 #include <mlx5_prm.h>
30 #include <mlx5_malloc.h>
31
32 #include "mlx5_defs.h"
33 #include "mlx5.h"
34 #include "mlx5_common_os.h"
35 #include "mlx5_flow.h"
36 #include "mlx5_flow_os.h"
37 #include "mlx5_rx.h"
38 #include "mlx5_tx.h"
39 #include "rte_pmd_mlx5.h"
40
41 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
42
43 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
44 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
45 #endif
46
47 #ifndef HAVE_MLX5DV_DR_ESWITCH
48 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
49 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
50 #endif
51 #endif
52
53 #ifndef HAVE_MLX5DV_DR
54 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
55 #endif
56
57 /* VLAN header definitions */
58 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
59 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
60 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
61 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
62 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
63
64 union flow_dv_attr {
65         struct {
66                 uint32_t valid:1;
67                 uint32_t ipv4:1;
68                 uint32_t ipv6:1;
69                 uint32_t tcp:1;
70                 uint32_t udp:1;
71                 uint32_t reserved:27;
72         };
73         uint32_t attr;
74 };
75
76 static int
77 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
78                              struct mlx5_flow_tbl_resource *tbl);
79
80 static int
81 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
82                                      uint32_t encap_decap_idx);
83
84 static int
85 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
86                                         uint32_t port_id);
87 static void
88 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss);
89
90 static int
91 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
92                                   uint32_t rix_jump);
93
94 /**
95  * Initialize flow attributes structure according to flow items' types.
96  *
97  * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel
98  * mode. For tunnel mode, the items to be modified are the outermost ones.
99  *
100  * @param[in] item
101  *   Pointer to item specification.
102  * @param[out] attr
103  *   Pointer to flow attributes structure.
104  * @param[in] dev_flow
105  *   Pointer to the sub flow.
106  * @param[in] tunnel_decap
107  *   Whether action is after tunnel decapsulation.
108  */
109 static void
110 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
111                   struct mlx5_flow *dev_flow, bool tunnel_decap)
112 {
113         uint64_t layers = dev_flow->handle->layers;
114
115         /*
116          * If layers is already initialized, it means this dev_flow is the
117          * suffix flow, the layers flags is set by the prefix flow. Need to
118          * use the layer flags from prefix flow as the suffix flow may not
119          * have the user defined items as the flow is split.
120          */
121         if (layers) {
122                 if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
123                         attr->ipv4 = 1;
124                 else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
125                         attr->ipv6 = 1;
126                 if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
127                         attr->tcp = 1;
128                 else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
129                         attr->udp = 1;
130                 attr->valid = 1;
131                 return;
132         }
133         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
134                 uint8_t next_protocol = 0xff;
135                 switch (item->type) {
136                 case RTE_FLOW_ITEM_TYPE_GRE:
137                 case RTE_FLOW_ITEM_TYPE_NVGRE:
138                 case RTE_FLOW_ITEM_TYPE_VXLAN:
139                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
140                 case RTE_FLOW_ITEM_TYPE_GENEVE:
141                 case RTE_FLOW_ITEM_TYPE_MPLS:
142                         if (tunnel_decap)
143                                 attr->attr = 0;
144                         break;
145                 case RTE_FLOW_ITEM_TYPE_IPV4:
146                         if (!attr->ipv6)
147                                 attr->ipv4 = 1;
148                         if (item->mask != NULL &&
149                             ((const struct rte_flow_item_ipv4 *)
150                             item->mask)->hdr.next_proto_id)
151                                 next_protocol =
152                                     ((const struct rte_flow_item_ipv4 *)
153                                       (item->spec))->hdr.next_proto_id &
154                                     ((const struct rte_flow_item_ipv4 *)
155                                       (item->mask))->hdr.next_proto_id;
156                         if ((next_protocol == IPPROTO_IPIP ||
157                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
158                                 attr->attr = 0;
159                         break;
160                 case RTE_FLOW_ITEM_TYPE_IPV6:
161                         if (!attr->ipv4)
162                                 attr->ipv6 = 1;
163                         if (item->mask != NULL &&
164                             ((const struct rte_flow_item_ipv6 *)
165                             item->mask)->hdr.proto)
166                                 next_protocol =
167                                     ((const struct rte_flow_item_ipv6 *)
168                                       (item->spec))->hdr.proto &
169                                     ((const struct rte_flow_item_ipv6 *)
170                                       (item->mask))->hdr.proto;
171                         if ((next_protocol == IPPROTO_IPIP ||
172                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
173                                 attr->attr = 0;
174                         break;
175                 case RTE_FLOW_ITEM_TYPE_UDP:
176                         if (!attr->tcp)
177                                 attr->udp = 1;
178                         break;
179                 case RTE_FLOW_ITEM_TYPE_TCP:
180                         if (!attr->udp)
181                                 attr->tcp = 1;
182                         break;
183                 default:
184                         break;
185                 }
186         }
187         attr->valid = 1;
188 }
189
190 /**
191  * Convert rte_mtr_color to mlx5 color.
192  *
193  * @param[in] rcol
194  *   rte_mtr_color.
195  *
196  * @return
197  *   mlx5 color.
198  */
199 static int
200 rte_col_2_mlx5_col(enum rte_color rcol)
201 {
202         switch (rcol) {
203         case RTE_COLOR_GREEN:
204                 return MLX5_FLOW_COLOR_GREEN;
205         case RTE_COLOR_YELLOW:
206                 return MLX5_FLOW_COLOR_YELLOW;
207         case RTE_COLOR_RED:
208                 return MLX5_FLOW_COLOR_RED;
209         default:
210                 break;
211         }
212         return MLX5_FLOW_COLOR_UNDEFINED;
213 }
214
215 struct field_modify_info {
216         uint32_t size; /* Size of field in protocol header, in bytes. */
217         uint32_t offset; /* Offset of field in protocol header, in bytes. */
218         enum mlx5_modification_field id;
219 };
220
221 struct field_modify_info modify_eth[] = {
222         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
223         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
224         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
225         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
226         {0, 0, 0},
227 };
228
229 struct field_modify_info modify_vlan_out_first_vid[] = {
230         /* Size in bits !!! */
231         {12, 0, MLX5_MODI_OUT_FIRST_VID},
232         {0, 0, 0},
233 };
234
235 struct field_modify_info modify_ipv4[] = {
236         {1,  1, MLX5_MODI_OUT_IP_DSCP},
237         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
238         {4, 12, MLX5_MODI_OUT_SIPV4},
239         {4, 16, MLX5_MODI_OUT_DIPV4},
240         {0, 0, 0},
241 };
242
243 struct field_modify_info modify_ipv6[] = {
244         {1,  0, MLX5_MODI_OUT_IP_DSCP},
245         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
246         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
247         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
248         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
249         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
250         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
251         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
252         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
253         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
254         {0, 0, 0},
255 };
256
257 struct field_modify_info modify_udp[] = {
258         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
259         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
260         {0, 0, 0},
261 };
262
263 struct field_modify_info modify_tcp[] = {
264         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
265         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
266         {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
267         {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
268         {0, 0, 0},
269 };
270
271 static const struct rte_flow_item *
272 mlx5_flow_find_tunnel_item(const struct rte_flow_item *item)
273 {
274         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
275                 switch (item->type) {
276                 default:
277                         break;
278                 case RTE_FLOW_ITEM_TYPE_VXLAN:
279                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
280                 case RTE_FLOW_ITEM_TYPE_GRE:
281                 case RTE_FLOW_ITEM_TYPE_MPLS:
282                 case RTE_FLOW_ITEM_TYPE_NVGRE:
283                 case RTE_FLOW_ITEM_TYPE_GENEVE:
284                         return item;
285                 case RTE_FLOW_ITEM_TYPE_IPV4:
286                 case RTE_FLOW_ITEM_TYPE_IPV6:
287                         if (item[1].type == RTE_FLOW_ITEM_TYPE_IPV4 ||
288                             item[1].type == RTE_FLOW_ITEM_TYPE_IPV6)
289                                 return item;
290                         break;
291                 }
292         }
293         return NULL;
294 }
295
296 static void
297 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
298                           uint8_t next_protocol, uint64_t *item_flags,
299                           int *tunnel)
300 {
301         MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
302                     item->type == RTE_FLOW_ITEM_TYPE_IPV6);
303         if (next_protocol == IPPROTO_IPIP) {
304                 *item_flags |= MLX5_FLOW_LAYER_IPIP;
305                 *tunnel = 1;
306         }
307         if (next_protocol == IPPROTO_IPV6) {
308                 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
309                 *tunnel = 1;
310         }
311 }
312
313 /* Update VLAN's VID/PCP based on input rte_flow_action.
314  *
315  * @param[in] action
316  *   Pointer to struct rte_flow_action.
317  * @param[out] vlan
318  *   Pointer to struct rte_vlan_hdr.
319  */
320 static void
321 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
322                          struct rte_vlan_hdr *vlan)
323 {
324         uint16_t vlan_tci;
325         if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
326                 vlan_tci =
327                     ((const struct rte_flow_action_of_set_vlan_pcp *)
328                                                action->conf)->vlan_pcp;
329                 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
330                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
331                 vlan->vlan_tci |= vlan_tci;
332         } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
333                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
334                 vlan->vlan_tci |= rte_be_to_cpu_16
335                     (((const struct rte_flow_action_of_set_vlan_vid *)
336                                              action->conf)->vlan_vid);
337         }
338 }
339
340 /**
341  * Fetch 1, 2, 3 or 4 byte field from the byte array
342  * and return as unsigned integer in host-endian format.
343  *
344  * @param[in] data
345  *   Pointer to data array.
346  * @param[in] size
347  *   Size of field to extract.
348  *
349  * @return
350  *   converted field in host endian format.
351  */
352 static inline uint32_t
353 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
354 {
355         uint32_t ret;
356
357         switch (size) {
358         case 1:
359                 ret = *data;
360                 break;
361         case 2:
362                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
363                 break;
364         case 3:
365                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
366                 ret = (ret << 8) | *(data + sizeof(uint16_t));
367                 break;
368         case 4:
369                 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
370                 break;
371         default:
372                 MLX5_ASSERT(false);
373                 ret = 0;
374                 break;
375         }
376         return ret;
377 }
378
379 /**
380  * Convert modify-header action to DV specification.
381  *
382  * Data length of each action is determined by provided field description
383  * and the item mask. Data bit offset and width of each action is determined
384  * by provided item mask.
385  *
386  * @param[in] item
387  *   Pointer to item specification.
388  * @param[in] field
389  *   Pointer to field modification information.
390  *     For MLX5_MODIFICATION_TYPE_SET specifies destination field.
391  *     For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
392  *     For MLX5_MODIFICATION_TYPE_COPY specifies source field.
393  * @param[in] dcopy
394  *   Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
395  *   Negative offset value sets the same offset as source offset.
396  *   size field is ignored, value is taken from source field.
397  * @param[in,out] resource
398  *   Pointer to the modify-header resource.
399  * @param[in] type
400  *   Type of modification.
401  * @param[out] error
402  *   Pointer to the error structure.
403  *
404  * @return
405  *   0 on success, a negative errno value otherwise and rte_errno is set.
406  */
407 static int
408 flow_dv_convert_modify_action(struct rte_flow_item *item,
409                               struct field_modify_info *field,
410                               struct field_modify_info *dcopy,
411                               struct mlx5_flow_dv_modify_hdr_resource *resource,
412                               uint32_t type, struct rte_flow_error *error)
413 {
414         uint32_t i = resource->actions_num;
415         struct mlx5_modification_cmd *actions = resource->actions;
416
417         /*
418          * The item and mask are provided in big-endian format.
419          * The fields should be presented as in big-endian format either.
420          * Mask must be always present, it defines the actual field width.
421          */
422         MLX5_ASSERT(item->mask);
423         MLX5_ASSERT(field->size);
424         do {
425                 unsigned int size_b;
426                 unsigned int off_b;
427                 uint32_t mask;
428                 uint32_t data;
429                 bool next_field = true;
430                 bool next_dcopy = true;
431
432                 if (i >= MLX5_MAX_MODIFY_NUM)
433                         return rte_flow_error_set(error, EINVAL,
434                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
435                                  "too many items to modify");
436                 /* Fetch variable byte size mask from the array. */
437                 mask = flow_dv_fetch_field((const uint8_t *)item->mask +
438                                            field->offset, field->size);
439                 if (!mask) {
440                         ++field;
441                         continue;
442                 }
443                 /* Deduce actual data width in bits from mask value. */
444                 off_b = rte_bsf32(mask);
445                 size_b = sizeof(uint32_t) * CHAR_BIT -
446                          off_b - __builtin_clz(mask);
447                 MLX5_ASSERT(size_b);
448                 actions[i] = (struct mlx5_modification_cmd) {
449                         .action_type = type,
450                         .field = field->id,
451                         .offset = off_b,
452                         .length = (size_b == sizeof(uint32_t) * CHAR_BIT) ?
453                                 0 : size_b,
454                 };
455                 if (type == MLX5_MODIFICATION_TYPE_COPY) {
456                         MLX5_ASSERT(dcopy);
457                         actions[i].dst_field = dcopy->id;
458                         actions[i].dst_offset =
459                                 (int)dcopy->offset < 0 ? off_b : dcopy->offset;
460                         /* Convert entire record to big-endian format. */
461                         actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
462                         /*
463                          * Destination field overflow. Copy leftovers of
464                          * a source field to the next destination field.
465                          */
466                         if ((size_b > dcopy->size * CHAR_BIT) && dcopy->size) {
467                                 actions[i].length = dcopy->size * CHAR_BIT;
468                                 field->offset += dcopy->size;
469                                 next_field = false;
470                         }
471                         /*
472                          * Not enough bits in a source filed to fill a
473                          * destination field. Switch to the next source.
474                          */
475                         if (dcopy->size > field->size &&
476                             (size_b == field->size * CHAR_BIT)) {
477                                 actions[i].length = field->size * CHAR_BIT;
478                                 dcopy->offset += field->size * CHAR_BIT;
479                                 next_dcopy = false;
480                         }
481                         if (next_dcopy)
482                                 ++dcopy;
483                 } else {
484                         MLX5_ASSERT(item->spec);
485                         data = flow_dv_fetch_field((const uint8_t *)item->spec +
486                                                    field->offset, field->size);
487                         /* Shift out the trailing masked bits from data. */
488                         data = (data & mask) >> off_b;
489                         actions[i].data1 = rte_cpu_to_be_32(data);
490                 }
491                 /* Convert entire record to expected big-endian format. */
492                 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
493                 if (next_field)
494                         ++field;
495                 ++i;
496         } while (field->size);
497         if (resource->actions_num == i)
498                 return rte_flow_error_set(error, EINVAL,
499                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
500                                           "invalid modification flow item");
501         resource->actions_num = i;
502         return 0;
503 }
504
505 /**
506  * Convert modify-header set IPv4 address action to DV specification.
507  *
508  * @param[in,out] resource
509  *   Pointer to the modify-header resource.
510  * @param[in] action
511  *   Pointer to action specification.
512  * @param[out] error
513  *   Pointer to the error structure.
514  *
515  * @return
516  *   0 on success, a negative errno value otherwise and rte_errno is set.
517  */
518 static int
519 flow_dv_convert_action_modify_ipv4
520                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
521                          const struct rte_flow_action *action,
522                          struct rte_flow_error *error)
523 {
524         const struct rte_flow_action_set_ipv4 *conf =
525                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
526         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
527         struct rte_flow_item_ipv4 ipv4;
528         struct rte_flow_item_ipv4 ipv4_mask;
529
530         memset(&ipv4, 0, sizeof(ipv4));
531         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
532         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
533                 ipv4.hdr.src_addr = conf->ipv4_addr;
534                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
535         } else {
536                 ipv4.hdr.dst_addr = conf->ipv4_addr;
537                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
538         }
539         item.spec = &ipv4;
540         item.mask = &ipv4_mask;
541         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
542                                              MLX5_MODIFICATION_TYPE_SET, error);
543 }
544
545 /**
546  * Convert modify-header set IPv6 address action to DV specification.
547  *
548  * @param[in,out] resource
549  *   Pointer to the modify-header resource.
550  * @param[in] action
551  *   Pointer to action specification.
552  * @param[out] error
553  *   Pointer to the error structure.
554  *
555  * @return
556  *   0 on success, a negative errno value otherwise and rte_errno is set.
557  */
558 static int
559 flow_dv_convert_action_modify_ipv6
560                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
561                          const struct rte_flow_action *action,
562                          struct rte_flow_error *error)
563 {
564         const struct rte_flow_action_set_ipv6 *conf =
565                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
566         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
567         struct rte_flow_item_ipv6 ipv6;
568         struct rte_flow_item_ipv6 ipv6_mask;
569
570         memset(&ipv6, 0, sizeof(ipv6));
571         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
572         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
573                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
574                        sizeof(ipv6.hdr.src_addr));
575                 memcpy(&ipv6_mask.hdr.src_addr,
576                        &rte_flow_item_ipv6_mask.hdr.src_addr,
577                        sizeof(ipv6.hdr.src_addr));
578         } else {
579                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
580                        sizeof(ipv6.hdr.dst_addr));
581                 memcpy(&ipv6_mask.hdr.dst_addr,
582                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
583                        sizeof(ipv6.hdr.dst_addr));
584         }
585         item.spec = &ipv6;
586         item.mask = &ipv6_mask;
587         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
588                                              MLX5_MODIFICATION_TYPE_SET, error);
589 }
590
591 /**
592  * Convert modify-header set MAC address action to DV specification.
593  *
594  * @param[in,out] resource
595  *   Pointer to the modify-header resource.
596  * @param[in] action
597  *   Pointer to action specification.
598  * @param[out] error
599  *   Pointer to the error structure.
600  *
601  * @return
602  *   0 on success, a negative errno value otherwise and rte_errno is set.
603  */
604 static int
605 flow_dv_convert_action_modify_mac
606                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
607                          const struct rte_flow_action *action,
608                          struct rte_flow_error *error)
609 {
610         const struct rte_flow_action_set_mac *conf =
611                 (const struct rte_flow_action_set_mac *)(action->conf);
612         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
613         struct rte_flow_item_eth eth;
614         struct rte_flow_item_eth eth_mask;
615
616         memset(&eth, 0, sizeof(eth));
617         memset(&eth_mask, 0, sizeof(eth_mask));
618         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
619                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
620                        sizeof(eth.src.addr_bytes));
621                 memcpy(&eth_mask.src.addr_bytes,
622                        &rte_flow_item_eth_mask.src.addr_bytes,
623                        sizeof(eth_mask.src.addr_bytes));
624         } else {
625                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
626                        sizeof(eth.dst.addr_bytes));
627                 memcpy(&eth_mask.dst.addr_bytes,
628                        &rte_flow_item_eth_mask.dst.addr_bytes,
629                        sizeof(eth_mask.dst.addr_bytes));
630         }
631         item.spec = &eth;
632         item.mask = &eth_mask;
633         return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
634                                              MLX5_MODIFICATION_TYPE_SET, error);
635 }
636
637 /**
638  * Convert modify-header set VLAN VID action to DV specification.
639  *
640  * @param[in,out] resource
641  *   Pointer to the modify-header resource.
642  * @param[in] action
643  *   Pointer to action specification.
644  * @param[out] error
645  *   Pointer to the error structure.
646  *
647  * @return
648  *   0 on success, a negative errno value otherwise and rte_errno is set.
649  */
650 static int
651 flow_dv_convert_action_modify_vlan_vid
652                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
653                          const struct rte_flow_action *action,
654                          struct rte_flow_error *error)
655 {
656         const struct rte_flow_action_of_set_vlan_vid *conf =
657                 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
658         int i = resource->actions_num;
659         struct mlx5_modification_cmd *actions = resource->actions;
660         struct field_modify_info *field = modify_vlan_out_first_vid;
661
662         if (i >= MLX5_MAX_MODIFY_NUM)
663                 return rte_flow_error_set(error, EINVAL,
664                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
665                          "too many items to modify");
666         actions[i] = (struct mlx5_modification_cmd) {
667                 .action_type = MLX5_MODIFICATION_TYPE_SET,
668                 .field = field->id,
669                 .length = field->size,
670                 .offset = field->offset,
671         };
672         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
673         actions[i].data1 = conf->vlan_vid;
674         actions[i].data1 = actions[i].data1 << 16;
675         resource->actions_num = ++i;
676         return 0;
677 }
678
679 /**
680  * Convert modify-header set TP action to DV specification.
681  *
682  * @param[in,out] resource
683  *   Pointer to the modify-header resource.
684  * @param[in] action
685  *   Pointer to action specification.
686  * @param[in] items
687  *   Pointer to rte_flow_item objects list.
688  * @param[in] attr
689  *   Pointer to flow attributes structure.
690  * @param[in] dev_flow
691  *   Pointer to the sub flow.
692  * @param[in] tunnel_decap
693  *   Whether action is after tunnel decapsulation.
694  * @param[out] error
695  *   Pointer to the error structure.
696  *
697  * @return
698  *   0 on success, a negative errno value otherwise and rte_errno is set.
699  */
700 static int
701 flow_dv_convert_action_modify_tp
702                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
703                          const struct rte_flow_action *action,
704                          const struct rte_flow_item *items,
705                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
706                          bool tunnel_decap, struct rte_flow_error *error)
707 {
708         const struct rte_flow_action_set_tp *conf =
709                 (const struct rte_flow_action_set_tp *)(action->conf);
710         struct rte_flow_item item;
711         struct rte_flow_item_udp udp;
712         struct rte_flow_item_udp udp_mask;
713         struct rte_flow_item_tcp tcp;
714         struct rte_flow_item_tcp tcp_mask;
715         struct field_modify_info *field;
716
717         if (!attr->valid)
718                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
719         if (attr->udp) {
720                 memset(&udp, 0, sizeof(udp));
721                 memset(&udp_mask, 0, sizeof(udp_mask));
722                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
723                         udp.hdr.src_port = conf->port;
724                         udp_mask.hdr.src_port =
725                                         rte_flow_item_udp_mask.hdr.src_port;
726                 } else {
727                         udp.hdr.dst_port = conf->port;
728                         udp_mask.hdr.dst_port =
729                                         rte_flow_item_udp_mask.hdr.dst_port;
730                 }
731                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
732                 item.spec = &udp;
733                 item.mask = &udp_mask;
734                 field = modify_udp;
735         } else {
736                 MLX5_ASSERT(attr->tcp);
737                 memset(&tcp, 0, sizeof(tcp));
738                 memset(&tcp_mask, 0, sizeof(tcp_mask));
739                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
740                         tcp.hdr.src_port = conf->port;
741                         tcp_mask.hdr.src_port =
742                                         rte_flow_item_tcp_mask.hdr.src_port;
743                 } else {
744                         tcp.hdr.dst_port = conf->port;
745                         tcp_mask.hdr.dst_port =
746                                         rte_flow_item_tcp_mask.hdr.dst_port;
747                 }
748                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
749                 item.spec = &tcp;
750                 item.mask = &tcp_mask;
751                 field = modify_tcp;
752         }
753         return flow_dv_convert_modify_action(&item, field, NULL, resource,
754                                              MLX5_MODIFICATION_TYPE_SET, error);
755 }
756
757 /**
758  * Convert modify-header set TTL action to DV specification.
759  *
760  * @param[in,out] resource
761  *   Pointer to the modify-header resource.
762  * @param[in] action
763  *   Pointer to action specification.
764  * @param[in] items
765  *   Pointer to rte_flow_item objects list.
766  * @param[in] attr
767  *   Pointer to flow attributes structure.
768  * @param[in] dev_flow
769  *   Pointer to the sub flow.
770  * @param[in] tunnel_decap
771  *   Whether action is after tunnel decapsulation.
772  * @param[out] error
773  *   Pointer to the error structure.
774  *
775  * @return
776  *   0 on success, a negative errno value otherwise and rte_errno is set.
777  */
778 static int
779 flow_dv_convert_action_modify_ttl
780                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
781                          const struct rte_flow_action *action,
782                          const struct rte_flow_item *items,
783                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
784                          bool tunnel_decap, struct rte_flow_error *error)
785 {
786         const struct rte_flow_action_set_ttl *conf =
787                 (const struct rte_flow_action_set_ttl *)(action->conf);
788         struct rte_flow_item item;
789         struct rte_flow_item_ipv4 ipv4;
790         struct rte_flow_item_ipv4 ipv4_mask;
791         struct rte_flow_item_ipv6 ipv6;
792         struct rte_flow_item_ipv6 ipv6_mask;
793         struct field_modify_info *field;
794
795         if (!attr->valid)
796                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
797         if (attr->ipv4) {
798                 memset(&ipv4, 0, sizeof(ipv4));
799                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
800                 ipv4.hdr.time_to_live = conf->ttl_value;
801                 ipv4_mask.hdr.time_to_live = 0xFF;
802                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
803                 item.spec = &ipv4;
804                 item.mask = &ipv4_mask;
805                 field = modify_ipv4;
806         } else {
807                 MLX5_ASSERT(attr->ipv6);
808                 memset(&ipv6, 0, sizeof(ipv6));
809                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
810                 ipv6.hdr.hop_limits = conf->ttl_value;
811                 ipv6_mask.hdr.hop_limits = 0xFF;
812                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
813                 item.spec = &ipv6;
814                 item.mask = &ipv6_mask;
815                 field = modify_ipv6;
816         }
817         return flow_dv_convert_modify_action(&item, field, NULL, resource,
818                                              MLX5_MODIFICATION_TYPE_SET, error);
819 }
820
821 /**
822  * Convert modify-header decrement TTL action to DV specification.
823  *
824  * @param[in,out] resource
825  *   Pointer to the modify-header resource.
826  * @param[in] action
827  *   Pointer to action specification.
828  * @param[in] items
829  *   Pointer to rte_flow_item objects list.
830  * @param[in] attr
831  *   Pointer to flow attributes structure.
832  * @param[in] dev_flow
833  *   Pointer to the sub flow.
834  * @param[in] tunnel_decap
835  *   Whether action is after tunnel decapsulation.
836  * @param[out] error
837  *   Pointer to the error structure.
838  *
839  * @return
840  *   0 on success, a negative errno value otherwise and rte_errno is set.
841  */
842 static int
843 flow_dv_convert_action_modify_dec_ttl
844                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
845                          const struct rte_flow_item *items,
846                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
847                          bool tunnel_decap, struct rte_flow_error *error)
848 {
849         struct rte_flow_item item;
850         struct rte_flow_item_ipv4 ipv4;
851         struct rte_flow_item_ipv4 ipv4_mask;
852         struct rte_flow_item_ipv6 ipv6;
853         struct rte_flow_item_ipv6 ipv6_mask;
854         struct field_modify_info *field;
855
856         if (!attr->valid)
857                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
858         if (attr->ipv4) {
859                 memset(&ipv4, 0, sizeof(ipv4));
860                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
861                 ipv4.hdr.time_to_live = 0xFF;
862                 ipv4_mask.hdr.time_to_live = 0xFF;
863                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
864                 item.spec = &ipv4;
865                 item.mask = &ipv4_mask;
866                 field = modify_ipv4;
867         } else {
868                 MLX5_ASSERT(attr->ipv6);
869                 memset(&ipv6, 0, sizeof(ipv6));
870                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
871                 ipv6.hdr.hop_limits = 0xFF;
872                 ipv6_mask.hdr.hop_limits = 0xFF;
873                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
874                 item.spec = &ipv6;
875                 item.mask = &ipv6_mask;
876                 field = modify_ipv6;
877         }
878         return flow_dv_convert_modify_action(&item, field, NULL, resource,
879                                              MLX5_MODIFICATION_TYPE_ADD, error);
880 }
881
882 /**
883  * Convert modify-header increment/decrement TCP Sequence number
884  * to DV specification.
885  *
886  * @param[in,out] resource
887  *   Pointer to the modify-header resource.
888  * @param[in] action
889  *   Pointer to action specification.
890  * @param[out] error
891  *   Pointer to the error structure.
892  *
893  * @return
894  *   0 on success, a negative errno value otherwise and rte_errno is set.
895  */
896 static int
897 flow_dv_convert_action_modify_tcp_seq
898                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
899                          const struct rte_flow_action *action,
900                          struct rte_flow_error *error)
901 {
902         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
903         uint64_t value = rte_be_to_cpu_32(*conf);
904         struct rte_flow_item item;
905         struct rte_flow_item_tcp tcp;
906         struct rte_flow_item_tcp tcp_mask;
907
908         memset(&tcp, 0, sizeof(tcp));
909         memset(&tcp_mask, 0, sizeof(tcp_mask));
910         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
911                 /*
912                  * The HW has no decrement operation, only increment operation.
913                  * To simulate decrement X from Y using increment operation
914                  * we need to add UINT32_MAX X times to Y.
915                  * Each adding of UINT32_MAX decrements Y by 1.
916                  */
917                 value *= UINT32_MAX;
918         tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
919         tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
920         item.type = RTE_FLOW_ITEM_TYPE_TCP;
921         item.spec = &tcp;
922         item.mask = &tcp_mask;
923         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
924                                              MLX5_MODIFICATION_TYPE_ADD, error);
925 }
926
927 /**
928  * Convert modify-header increment/decrement TCP Acknowledgment number
929  * to DV specification.
930  *
931  * @param[in,out] resource
932  *   Pointer to the modify-header resource.
933  * @param[in] action
934  *   Pointer to action specification.
935  * @param[out] error
936  *   Pointer to the error structure.
937  *
938  * @return
939  *   0 on success, a negative errno value otherwise and rte_errno is set.
940  */
941 static int
942 flow_dv_convert_action_modify_tcp_ack
943                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
944                          const struct rte_flow_action *action,
945                          struct rte_flow_error *error)
946 {
947         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
948         uint64_t value = rte_be_to_cpu_32(*conf);
949         struct rte_flow_item item;
950         struct rte_flow_item_tcp tcp;
951         struct rte_flow_item_tcp tcp_mask;
952
953         memset(&tcp, 0, sizeof(tcp));
954         memset(&tcp_mask, 0, sizeof(tcp_mask));
955         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
956                 /*
957                  * The HW has no decrement operation, only increment operation.
958                  * To simulate decrement X from Y using increment operation
959                  * we need to add UINT32_MAX X times to Y.
960                  * Each adding of UINT32_MAX decrements Y by 1.
961                  */
962                 value *= UINT32_MAX;
963         tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
964         tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
965         item.type = RTE_FLOW_ITEM_TYPE_TCP;
966         item.spec = &tcp;
967         item.mask = &tcp_mask;
968         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
969                                              MLX5_MODIFICATION_TYPE_ADD, error);
970 }
971
972 static enum mlx5_modification_field reg_to_field[] = {
973         [REG_NON] = MLX5_MODI_OUT_NONE,
974         [REG_A] = MLX5_MODI_META_DATA_REG_A,
975         [REG_B] = MLX5_MODI_META_DATA_REG_B,
976         [REG_C_0] = MLX5_MODI_META_REG_C_0,
977         [REG_C_1] = MLX5_MODI_META_REG_C_1,
978         [REG_C_2] = MLX5_MODI_META_REG_C_2,
979         [REG_C_3] = MLX5_MODI_META_REG_C_3,
980         [REG_C_4] = MLX5_MODI_META_REG_C_4,
981         [REG_C_5] = MLX5_MODI_META_REG_C_5,
982         [REG_C_6] = MLX5_MODI_META_REG_C_6,
983         [REG_C_7] = MLX5_MODI_META_REG_C_7,
984 };
985
986 /**
987  * Convert register set to DV specification.
988  *
989  * @param[in,out] resource
990  *   Pointer to the modify-header resource.
991  * @param[in] action
992  *   Pointer to action specification.
993  * @param[out] error
994  *   Pointer to the error structure.
995  *
996  * @return
997  *   0 on success, a negative errno value otherwise and rte_errno is set.
998  */
999 static int
1000 flow_dv_convert_action_set_reg
1001                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1002                          const struct rte_flow_action *action,
1003                          struct rte_flow_error *error)
1004 {
1005         const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
1006         struct mlx5_modification_cmd *actions = resource->actions;
1007         uint32_t i = resource->actions_num;
1008
1009         if (i >= MLX5_MAX_MODIFY_NUM)
1010                 return rte_flow_error_set(error, EINVAL,
1011                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1012                                           "too many items to modify");
1013         MLX5_ASSERT(conf->id != REG_NON);
1014         MLX5_ASSERT(conf->id < (enum modify_reg)RTE_DIM(reg_to_field));
1015         actions[i] = (struct mlx5_modification_cmd) {
1016                 .action_type = MLX5_MODIFICATION_TYPE_SET,
1017                 .field = reg_to_field[conf->id],
1018                 .offset = conf->offset,
1019                 .length = conf->length,
1020         };
1021         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
1022         actions[i].data1 = rte_cpu_to_be_32(conf->data);
1023         ++i;
1024         resource->actions_num = i;
1025         return 0;
1026 }
1027
1028 /**
1029  * Convert SET_TAG action to DV specification.
1030  *
1031  * @param[in] dev
1032  *   Pointer to the rte_eth_dev structure.
1033  * @param[in,out] resource
1034  *   Pointer to the modify-header resource.
1035  * @param[in] conf
1036  *   Pointer to action specification.
1037  * @param[out] error
1038  *   Pointer to the error structure.
1039  *
1040  * @return
1041  *   0 on success, a negative errno value otherwise and rte_errno is set.
1042  */
1043 static int
1044 flow_dv_convert_action_set_tag
1045                         (struct rte_eth_dev *dev,
1046                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1047                          const struct rte_flow_action_set_tag *conf,
1048                          struct rte_flow_error *error)
1049 {
1050         rte_be32_t data = rte_cpu_to_be_32(conf->data);
1051         rte_be32_t mask = rte_cpu_to_be_32(conf->mask);
1052         struct rte_flow_item item = {
1053                 .spec = &data,
1054                 .mask = &mask,
1055         };
1056         struct field_modify_info reg_c_x[] = {
1057                 [1] = {0, 0, 0},
1058         };
1059         enum mlx5_modification_field reg_type;
1060         int ret;
1061
1062         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
1063         if (ret < 0)
1064                 return ret;
1065         MLX5_ASSERT(ret != REG_NON);
1066         MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
1067         reg_type = reg_to_field[ret];
1068         MLX5_ASSERT(reg_type > 0);
1069         reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
1070         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1071                                              MLX5_MODIFICATION_TYPE_SET, error);
1072 }
1073
1074 /**
1075  * Convert internal COPY_REG action to DV specification.
1076  *
1077  * @param[in] dev
1078  *   Pointer to the rte_eth_dev structure.
1079  * @param[in,out] res
1080  *   Pointer to the modify-header resource.
1081  * @param[in] action
1082  *   Pointer to action specification.
1083  * @param[out] error
1084  *   Pointer to the error structure.
1085  *
1086  * @return
1087  *   0 on success, a negative errno value otherwise and rte_errno is set.
1088  */
1089 static int
1090 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
1091                                  struct mlx5_flow_dv_modify_hdr_resource *res,
1092                                  const struct rte_flow_action *action,
1093                                  struct rte_flow_error *error)
1094 {
1095         const struct mlx5_flow_action_copy_mreg *conf = action->conf;
1096         rte_be32_t mask = RTE_BE32(UINT32_MAX);
1097         struct rte_flow_item item = {
1098                 .spec = NULL,
1099                 .mask = &mask,
1100         };
1101         struct field_modify_info reg_src[] = {
1102                 {4, 0, reg_to_field[conf->src]},
1103                 {0, 0, 0},
1104         };
1105         struct field_modify_info reg_dst = {
1106                 .offset = 0,
1107                 .id = reg_to_field[conf->dst],
1108         };
1109         /* Adjust reg_c[0] usage according to reported mask. */
1110         if (conf->dst == REG_C_0 || conf->src == REG_C_0) {
1111                 struct mlx5_priv *priv = dev->data->dev_private;
1112                 uint32_t reg_c0 = priv->sh->dv_regc0_mask;
1113
1114                 MLX5_ASSERT(reg_c0);
1115                 MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
1116                 if (conf->dst == REG_C_0) {
1117                         /* Copy to reg_c[0], within mask only. */
1118                         reg_dst.offset = rte_bsf32(reg_c0);
1119                         /*
1120                          * Mask is ignoring the enianness, because
1121                          * there is no conversion in datapath.
1122                          */
1123 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1124                         /* Copy from destination lower bits to reg_c[0]. */
1125                         mask = reg_c0 >> reg_dst.offset;
1126 #else
1127                         /* Copy from destination upper bits to reg_c[0]. */
1128                         mask = reg_c0 << (sizeof(reg_c0) * CHAR_BIT -
1129                                           rte_fls_u32(reg_c0));
1130 #endif
1131                 } else {
1132                         mask = rte_cpu_to_be_32(reg_c0);
1133 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1134                         /* Copy from reg_c[0] to destination lower bits. */
1135                         reg_dst.offset = 0;
1136 #else
1137                         /* Copy from reg_c[0] to destination upper bits. */
1138                         reg_dst.offset = sizeof(reg_c0) * CHAR_BIT -
1139                                          (rte_fls_u32(reg_c0) -
1140                                           rte_bsf32(reg_c0));
1141 #endif
1142                 }
1143         }
1144         return flow_dv_convert_modify_action(&item,
1145                                              reg_src, &reg_dst, res,
1146                                              MLX5_MODIFICATION_TYPE_COPY,
1147                                              error);
1148 }
1149
1150 /**
1151  * Convert MARK action to DV specification. This routine is used
1152  * in extensive metadata only and requires metadata register to be
1153  * handled. In legacy mode hardware tag resource is engaged.
1154  *
1155  * @param[in] dev
1156  *   Pointer to the rte_eth_dev structure.
1157  * @param[in] conf
1158  *   Pointer to MARK action specification.
1159  * @param[in,out] resource
1160  *   Pointer to the modify-header resource.
1161  * @param[out] error
1162  *   Pointer to the error structure.
1163  *
1164  * @return
1165  *   0 on success, a negative errno value otherwise and rte_errno is set.
1166  */
1167 static int
1168 flow_dv_convert_action_mark(struct rte_eth_dev *dev,
1169                             const struct rte_flow_action_mark *conf,
1170                             struct mlx5_flow_dv_modify_hdr_resource *resource,
1171                             struct rte_flow_error *error)
1172 {
1173         struct mlx5_priv *priv = dev->data->dev_private;
1174         rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK &
1175                                            priv->sh->dv_mark_mask);
1176         rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask;
1177         struct rte_flow_item item = {
1178                 .spec = &data,
1179                 .mask = &mask,
1180         };
1181         struct field_modify_info reg_c_x[] = {
1182                 [1] = {0, 0, 0},
1183         };
1184         int reg;
1185
1186         if (!mask)
1187                 return rte_flow_error_set(error, EINVAL,
1188                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1189                                           NULL, "zero mark action mask");
1190         reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1191         if (reg < 0)
1192                 return reg;
1193         MLX5_ASSERT(reg > 0);
1194         if (reg == REG_C_0) {
1195                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1196                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1197
1198                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1199                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1200                 mask = rte_cpu_to_be_32(mask << shl_c0);
1201         }
1202         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1203         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1204                                              MLX5_MODIFICATION_TYPE_SET, error);
1205 }
1206
1207 /**
1208  * Get metadata register index for specified steering domain.
1209  *
1210  * @param[in] dev
1211  *   Pointer to the rte_eth_dev structure.
1212  * @param[in] attr
1213  *   Attributes of flow to determine steering domain.
1214  * @param[out] error
1215  *   Pointer to the error structure.
1216  *
1217  * @return
1218  *   positive index on success, a negative errno value otherwise
1219  *   and rte_errno is set.
1220  */
1221 static enum modify_reg
1222 flow_dv_get_metadata_reg(struct rte_eth_dev *dev,
1223                          const struct rte_flow_attr *attr,
1224                          struct rte_flow_error *error)
1225 {
1226         int reg =
1227                 mlx5_flow_get_reg_id(dev, attr->transfer ?
1228                                           MLX5_METADATA_FDB :
1229                                             attr->egress ?
1230                                             MLX5_METADATA_TX :
1231                                             MLX5_METADATA_RX, 0, error);
1232         if (reg < 0)
1233                 return rte_flow_error_set(error,
1234                                           ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1235                                           NULL, "unavailable "
1236                                           "metadata register");
1237         return reg;
1238 }
1239
1240 /**
1241  * Convert SET_META action to DV specification.
1242  *
1243  * @param[in] dev
1244  *   Pointer to the rte_eth_dev structure.
1245  * @param[in,out] resource
1246  *   Pointer to the modify-header resource.
1247  * @param[in] attr
1248  *   Attributes of flow that includes this item.
1249  * @param[in] conf
1250  *   Pointer to action specification.
1251  * @param[out] error
1252  *   Pointer to the error structure.
1253  *
1254  * @return
1255  *   0 on success, a negative errno value otherwise and rte_errno is set.
1256  */
1257 static int
1258 flow_dv_convert_action_set_meta
1259                         (struct rte_eth_dev *dev,
1260                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1261                          const struct rte_flow_attr *attr,
1262                          const struct rte_flow_action_set_meta *conf,
1263                          struct rte_flow_error *error)
1264 {
1265         uint32_t mask = rte_cpu_to_be_32(conf->mask);
1266         uint32_t data = rte_cpu_to_be_32(conf->data) & mask;
1267         struct rte_flow_item item = {
1268                 .spec = &data,
1269                 .mask = &mask,
1270         };
1271         struct field_modify_info reg_c_x[] = {
1272                 [1] = {0, 0, 0},
1273         };
1274         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1275
1276         if (reg < 0)
1277                 return reg;
1278         MLX5_ASSERT(reg != REG_NON);
1279         if (reg == REG_C_0) {
1280                 struct mlx5_priv *priv = dev->data->dev_private;
1281                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1282                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1283
1284                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1285                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1286                 mask = rte_cpu_to_be_32(mask << shl_c0);
1287         }
1288         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1289         /* The routine expects parameters in memory as big-endian ones. */
1290         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1291                                              MLX5_MODIFICATION_TYPE_SET, error);
1292 }
1293
1294 /**
1295  * Convert modify-header set IPv4 DSCP action to DV specification.
1296  *
1297  * @param[in,out] resource
1298  *   Pointer to the modify-header resource.
1299  * @param[in] action
1300  *   Pointer to action specification.
1301  * @param[out] error
1302  *   Pointer to the error structure.
1303  *
1304  * @return
1305  *   0 on success, a negative errno value otherwise and rte_errno is set.
1306  */
1307 static int
1308 flow_dv_convert_action_modify_ipv4_dscp
1309                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1310                          const struct rte_flow_action *action,
1311                          struct rte_flow_error *error)
1312 {
1313         const struct rte_flow_action_set_dscp *conf =
1314                 (const struct rte_flow_action_set_dscp *)(action->conf);
1315         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
1316         struct rte_flow_item_ipv4 ipv4;
1317         struct rte_flow_item_ipv4 ipv4_mask;
1318
1319         memset(&ipv4, 0, sizeof(ipv4));
1320         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
1321         ipv4.hdr.type_of_service = conf->dscp;
1322         ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2;
1323         item.spec = &ipv4;
1324         item.mask = &ipv4_mask;
1325         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
1326                                              MLX5_MODIFICATION_TYPE_SET, error);
1327 }
1328
1329 /**
1330  * Convert modify-header set IPv6 DSCP action to DV specification.
1331  *
1332  * @param[in,out] resource
1333  *   Pointer to the modify-header resource.
1334  * @param[in] action
1335  *   Pointer to action specification.
1336  * @param[out] error
1337  *   Pointer to the error structure.
1338  *
1339  * @return
1340  *   0 on success, a negative errno value otherwise and rte_errno is set.
1341  */
1342 static int
1343 flow_dv_convert_action_modify_ipv6_dscp
1344                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1345                          const struct rte_flow_action *action,
1346                          struct rte_flow_error *error)
1347 {
1348         const struct rte_flow_action_set_dscp *conf =
1349                 (const struct rte_flow_action_set_dscp *)(action->conf);
1350         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
1351         struct rte_flow_item_ipv6 ipv6;
1352         struct rte_flow_item_ipv6 ipv6_mask;
1353
1354         memset(&ipv6, 0, sizeof(ipv6));
1355         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
1356         /*
1357          * Even though the DSCP bits offset of IPv6 is not byte aligned,
1358          * rdma-core only accept the DSCP bits byte aligned start from
1359          * bit 0 to 5 as to be compatible with IPv4. No need to shift the
1360          * bits in IPv6 case as rdma-core requires byte aligned value.
1361          */
1362         ipv6.hdr.vtc_flow = conf->dscp;
1363         ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22;
1364         item.spec = &ipv6;
1365         item.mask = &ipv6_mask;
1366         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
1367                                              MLX5_MODIFICATION_TYPE_SET, error);
1368 }
1369
1370 static int
1371 mlx5_flow_item_field_width(struct mlx5_dev_config *config,
1372                            enum rte_flow_field_id field)
1373 {
1374         switch (field) {
1375         case RTE_FLOW_FIELD_START:
1376                 return 32;
1377         case RTE_FLOW_FIELD_MAC_DST:
1378         case RTE_FLOW_FIELD_MAC_SRC:
1379                 return 48;
1380         case RTE_FLOW_FIELD_VLAN_TYPE:
1381                 return 16;
1382         case RTE_FLOW_FIELD_VLAN_ID:
1383                 return 12;
1384         case RTE_FLOW_FIELD_MAC_TYPE:
1385                 return 16;
1386         case RTE_FLOW_FIELD_IPV4_DSCP:
1387                 return 6;
1388         case RTE_FLOW_FIELD_IPV4_TTL:
1389                 return 8;
1390         case RTE_FLOW_FIELD_IPV4_SRC:
1391         case RTE_FLOW_FIELD_IPV4_DST:
1392                 return 32;
1393         case RTE_FLOW_FIELD_IPV6_DSCP:
1394                 return 6;
1395         case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1396                 return 8;
1397         case RTE_FLOW_FIELD_IPV6_SRC:
1398         case RTE_FLOW_FIELD_IPV6_DST:
1399                 return 128;
1400         case RTE_FLOW_FIELD_TCP_PORT_SRC:
1401         case RTE_FLOW_FIELD_TCP_PORT_DST:
1402                 return 16;
1403         case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1404         case RTE_FLOW_FIELD_TCP_ACK_NUM:
1405                 return 32;
1406         case RTE_FLOW_FIELD_TCP_FLAGS:
1407                 return 9;
1408         case RTE_FLOW_FIELD_UDP_PORT_SRC:
1409         case RTE_FLOW_FIELD_UDP_PORT_DST:
1410                 return 16;
1411         case RTE_FLOW_FIELD_VXLAN_VNI:
1412         case RTE_FLOW_FIELD_GENEVE_VNI:
1413                 return 24;
1414         case RTE_FLOW_FIELD_GTP_TEID:
1415         case RTE_FLOW_FIELD_TAG:
1416                 return 32;
1417         case RTE_FLOW_FIELD_MARK:
1418                 return 24;
1419         case RTE_FLOW_FIELD_META:
1420                 if (config->dv_xmeta_en == MLX5_XMETA_MODE_META16)
1421                         return 16;
1422                 else if (config->dv_xmeta_en == MLX5_XMETA_MODE_META32)
1423                         return 32;
1424                 else
1425                         return 0;
1426         case RTE_FLOW_FIELD_POINTER:
1427         case RTE_FLOW_FIELD_VALUE:
1428                 return 64;
1429         default:
1430                 MLX5_ASSERT(false);
1431         }
1432         return 0;
1433 }
1434
1435 static void
1436 mlx5_flow_field_id_to_modify_info
1437                 (const struct rte_flow_action_modify_data *data,
1438                  struct field_modify_info *info,
1439                  uint32_t *mask, uint32_t *value,
1440                  uint32_t width, uint32_t dst_width,
1441                  struct rte_eth_dev *dev,
1442                  const struct rte_flow_attr *attr,
1443                  struct rte_flow_error *error)
1444 {
1445         struct mlx5_priv *priv = dev->data->dev_private;
1446         struct mlx5_dev_config *config = &priv->config;
1447         uint32_t idx = 0;
1448         uint32_t off = 0;
1449         uint64_t val = 0;
1450         switch (data->field) {
1451         case RTE_FLOW_FIELD_START:
1452                 /* not supported yet */
1453                 MLX5_ASSERT(false);
1454                 break;
1455         case RTE_FLOW_FIELD_MAC_DST:
1456                 off = data->offset > 16 ? data->offset - 16 : 0;
1457                 if (mask) {
1458                         if (data->offset < 16) {
1459                                 info[idx] = (struct field_modify_info){2, 0,
1460                                                 MLX5_MODI_OUT_DMAC_15_0};
1461                                 if (width < 16) {
1462                                         mask[idx] = rte_cpu_to_be_16(0xffff >>
1463                                                                  (16 - width));
1464                                         width = 0;
1465                                 } else {
1466                                         mask[idx] = RTE_BE16(0xffff);
1467                                         width -= 16;
1468                                 }
1469                                 if (!width)
1470                                         break;
1471                                 ++idx;
1472                         }
1473                         info[idx] = (struct field_modify_info){4, 4 * idx,
1474                                                 MLX5_MODI_OUT_DMAC_47_16};
1475                         mask[idx] = rte_cpu_to_be_32((0xffffffff >>
1476                                                       (32 - width)) << off);
1477                 } else {
1478                         if (data->offset < 16)
1479                                 info[idx++] = (struct field_modify_info){2, 0,
1480                                                 MLX5_MODI_OUT_DMAC_15_0};
1481                         info[idx] = (struct field_modify_info){4, off,
1482                                                 MLX5_MODI_OUT_DMAC_47_16};
1483                 }
1484                 break;
1485         case RTE_FLOW_FIELD_MAC_SRC:
1486                 off = data->offset > 16 ? data->offset - 16 : 0;
1487                 if (mask) {
1488                         if (data->offset < 16) {
1489                                 info[idx] = (struct field_modify_info){2, 0,
1490                                                 MLX5_MODI_OUT_SMAC_15_0};
1491                                 if (width < 16) {
1492                                         mask[idx] = rte_cpu_to_be_16(0xffff >>
1493                                                                  (16 - width));
1494                                         width = 0;
1495                                 } else {
1496                                         mask[idx] = RTE_BE16(0xffff);
1497                                         width -= 16;
1498                                 }
1499                                 if (!width)
1500                                         break;
1501                                 ++idx;
1502                         }
1503                         info[idx] = (struct field_modify_info){4, 4 * idx,
1504                                                 MLX5_MODI_OUT_SMAC_47_16};
1505                         mask[idx] = rte_cpu_to_be_32((0xffffffff >>
1506                                                       (32 - width)) << off);
1507                 } else {
1508                         if (data->offset < 16)
1509                                 info[idx++] = (struct field_modify_info){2, 0,
1510                                                 MLX5_MODI_OUT_SMAC_15_0};
1511                         info[idx] = (struct field_modify_info){4, off,
1512                                                 MLX5_MODI_OUT_SMAC_47_16};
1513                 }
1514                 break;
1515         case RTE_FLOW_FIELD_VLAN_TYPE:
1516                 /* not supported yet */
1517                 break;
1518         case RTE_FLOW_FIELD_VLAN_ID:
1519                 info[idx] = (struct field_modify_info){2, 0,
1520                                         MLX5_MODI_OUT_FIRST_VID};
1521                 if (mask)
1522                         mask[idx] = rte_cpu_to_be_16(0x0fff >> (12 - width));
1523                 break;
1524         case RTE_FLOW_FIELD_MAC_TYPE:
1525                 info[idx] = (struct field_modify_info){2, 0,
1526                                         MLX5_MODI_OUT_ETHERTYPE};
1527                 if (mask)
1528                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1529                 break;
1530         case RTE_FLOW_FIELD_IPV4_DSCP:
1531                 info[idx] = (struct field_modify_info){1, 0,
1532                                         MLX5_MODI_OUT_IP_DSCP};
1533                 if (mask)
1534                         mask[idx] = 0x3f >> (6 - width);
1535                 break;
1536         case RTE_FLOW_FIELD_IPV4_TTL:
1537                 info[idx] = (struct field_modify_info){1, 0,
1538                                         MLX5_MODI_OUT_IPV4_TTL};
1539                 if (mask)
1540                         mask[idx] = 0xff >> (8 - width);
1541                 break;
1542         case RTE_FLOW_FIELD_IPV4_SRC:
1543                 info[idx] = (struct field_modify_info){4, 0,
1544                                         MLX5_MODI_OUT_SIPV4};
1545                 if (mask)
1546                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1547                                                      (32 - width));
1548                 break;
1549         case RTE_FLOW_FIELD_IPV4_DST:
1550                 info[idx] = (struct field_modify_info){4, 0,
1551                                         MLX5_MODI_OUT_DIPV4};
1552                 if (mask)
1553                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1554                                                      (32 - width));
1555                 break;
1556         case RTE_FLOW_FIELD_IPV6_DSCP:
1557                 info[idx] = (struct field_modify_info){1, 0,
1558                                         MLX5_MODI_OUT_IP_DSCP};
1559                 if (mask)
1560                         mask[idx] = 0x3f >> (6 - width);
1561                 break;
1562         case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1563                 info[idx] = (struct field_modify_info){1, 0,
1564                                         MLX5_MODI_OUT_IPV6_HOPLIMIT};
1565                 if (mask)
1566                         mask[idx] = 0xff >> (8 - width);
1567                 break;
1568         case RTE_FLOW_FIELD_IPV6_SRC:
1569                 if (mask) {
1570                         if (data->offset < 32) {
1571                                 info[idx] = (struct field_modify_info){4,
1572                                                 4 * idx,
1573                                                 MLX5_MODI_OUT_SIPV6_31_0};
1574                                 if (width < 32) {
1575                                         mask[idx] =
1576                                                 rte_cpu_to_be_32(0xffffffff >>
1577                                                                  (32 - width));
1578                                         width = 0;
1579                                 } else {
1580                                         mask[idx] = RTE_BE32(0xffffffff);
1581                                         width -= 32;
1582                                 }
1583                                 if (!width)
1584                                         break;
1585                                 ++idx;
1586                         }
1587                         if (data->offset < 64) {
1588                                 info[idx] = (struct field_modify_info){4,
1589                                                 4 * idx,
1590                                                 MLX5_MODI_OUT_SIPV6_63_32};
1591                                 if (width < 32) {
1592                                         mask[idx] =
1593                                                 rte_cpu_to_be_32(0xffffffff >>
1594                                                                  (32 - width));
1595                                         width = 0;
1596                                 } else {
1597                                         mask[idx] = RTE_BE32(0xffffffff);
1598                                         width -= 32;
1599                                 }
1600                                 if (!width)
1601                                         break;
1602                                 ++idx;
1603                         }
1604                         if (data->offset < 96) {
1605                                 info[idx] = (struct field_modify_info){4,
1606                                                 4 * idx,
1607                                                 MLX5_MODI_OUT_SIPV6_95_64};
1608                                 if (width < 32) {
1609                                         mask[idx] =
1610                                                 rte_cpu_to_be_32(0xffffffff >>
1611                                                                  (32 - width));
1612                                         width = 0;
1613                                 } else {
1614                                         mask[idx] = RTE_BE32(0xffffffff);
1615                                         width -= 32;
1616                                 }
1617                                 if (!width)
1618                                         break;
1619                                 ++idx;
1620                         }
1621                         info[idx] = (struct field_modify_info){4, 4 * idx,
1622                                                 MLX5_MODI_OUT_SIPV6_127_96};
1623                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1624                                                      (32 - width));
1625                 } else {
1626                         if (data->offset < 32)
1627                                 info[idx++] = (struct field_modify_info){4, 0,
1628                                                 MLX5_MODI_OUT_SIPV6_31_0};
1629                         if (data->offset < 64)
1630                                 info[idx++] = (struct field_modify_info){4, 0,
1631                                                 MLX5_MODI_OUT_SIPV6_63_32};
1632                         if (data->offset < 96)
1633                                 info[idx++] = (struct field_modify_info){4, 0,
1634                                                 MLX5_MODI_OUT_SIPV6_95_64};
1635                         if (data->offset < 128)
1636                                 info[idx++] = (struct field_modify_info){4, 0,
1637                                                 MLX5_MODI_OUT_SIPV6_127_96};
1638                 }
1639                 break;
1640         case RTE_FLOW_FIELD_IPV6_DST:
1641                 if (mask) {
1642                         if (data->offset < 32) {
1643                                 info[idx] = (struct field_modify_info){4,
1644                                                 4 * idx,
1645                                                 MLX5_MODI_OUT_DIPV6_31_0};
1646                                 if (width < 32) {
1647                                         mask[idx] =
1648                                                 rte_cpu_to_be_32(0xffffffff >>
1649                                                                  (32 - width));
1650                                         width = 0;
1651                                 } else {
1652                                         mask[idx] = RTE_BE32(0xffffffff);
1653                                         width -= 32;
1654                                 }
1655                                 if (!width)
1656                                         break;
1657                                 ++idx;
1658                         }
1659                         if (data->offset < 64) {
1660                                 info[idx] = (struct field_modify_info){4,
1661                                                 4 * idx,
1662                                                 MLX5_MODI_OUT_DIPV6_63_32};
1663                                 if (width < 32) {
1664                                         mask[idx] =
1665                                                 rte_cpu_to_be_32(0xffffffff >>
1666                                                                  (32 - width));
1667                                         width = 0;
1668                                 } else {
1669                                         mask[idx] = RTE_BE32(0xffffffff);
1670                                         width -= 32;
1671                                 }
1672                                 if (!width)
1673                                         break;
1674                                 ++idx;
1675                         }
1676                         if (data->offset < 96) {
1677                                 info[idx] = (struct field_modify_info){4,
1678                                                 4 * idx,
1679                                                 MLX5_MODI_OUT_DIPV6_95_64};
1680                                 if (width < 32) {
1681                                         mask[idx] =
1682                                                 rte_cpu_to_be_32(0xffffffff >>
1683                                                                  (32 - width));
1684                                         width = 0;
1685                                 } else {
1686                                         mask[idx] = RTE_BE32(0xffffffff);
1687                                         width -= 32;
1688                                 }
1689                                 if (!width)
1690                                         break;
1691                                 ++idx;
1692                         }
1693                         info[idx] = (struct field_modify_info){4, 4 * idx,
1694                                                 MLX5_MODI_OUT_DIPV6_127_96};
1695                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1696                                                      (32 - width));
1697                 } else {
1698                         if (data->offset < 32)
1699                                 info[idx++] = (struct field_modify_info){4, 0,
1700                                                 MLX5_MODI_OUT_DIPV6_31_0};
1701                         if (data->offset < 64)
1702                                 info[idx++] = (struct field_modify_info){4, 0,
1703                                                 MLX5_MODI_OUT_DIPV6_63_32};
1704                         if (data->offset < 96)
1705                                 info[idx++] = (struct field_modify_info){4, 0,
1706                                                 MLX5_MODI_OUT_DIPV6_95_64};
1707                         if (data->offset < 128)
1708                                 info[idx++] = (struct field_modify_info){4, 0,
1709                                                 MLX5_MODI_OUT_DIPV6_127_96};
1710                 }
1711                 break;
1712         case RTE_FLOW_FIELD_TCP_PORT_SRC:
1713                 info[idx] = (struct field_modify_info){2, 0,
1714                                         MLX5_MODI_OUT_TCP_SPORT};
1715                 if (mask)
1716                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1717                 break;
1718         case RTE_FLOW_FIELD_TCP_PORT_DST:
1719                 info[idx] = (struct field_modify_info){2, 0,
1720                                         MLX5_MODI_OUT_TCP_DPORT};
1721                 if (mask)
1722                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1723                 break;
1724         case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1725                 info[idx] = (struct field_modify_info){4, 0,
1726                                         MLX5_MODI_OUT_TCP_SEQ_NUM};
1727                 if (mask)
1728                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1729                                                      (32 - width));
1730                 break;
1731         case RTE_FLOW_FIELD_TCP_ACK_NUM:
1732                 info[idx] = (struct field_modify_info){4, 0,
1733                                         MLX5_MODI_OUT_TCP_ACK_NUM};
1734                 if (mask)
1735                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1736                                                      (32 - width));
1737                 break;
1738         case RTE_FLOW_FIELD_TCP_FLAGS:
1739                 info[idx] = (struct field_modify_info){2, 0,
1740                                         MLX5_MODI_OUT_TCP_FLAGS};
1741                 if (mask)
1742                         mask[idx] = rte_cpu_to_be_16(0x1ff >> (9 - width));
1743                 break;
1744         case RTE_FLOW_FIELD_UDP_PORT_SRC:
1745                 info[idx] = (struct field_modify_info){2, 0,
1746                                         MLX5_MODI_OUT_UDP_SPORT};
1747                 if (mask)
1748                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1749                 break;
1750         case RTE_FLOW_FIELD_UDP_PORT_DST:
1751                 info[idx] = (struct field_modify_info){2, 0,
1752                                         MLX5_MODI_OUT_UDP_DPORT};
1753                 if (mask)
1754                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1755                 break;
1756         case RTE_FLOW_FIELD_VXLAN_VNI:
1757                 /* not supported yet */
1758                 break;
1759         case RTE_FLOW_FIELD_GENEVE_VNI:
1760                 /* not supported yet*/
1761                 break;
1762         case RTE_FLOW_FIELD_GTP_TEID:
1763                 info[idx] = (struct field_modify_info){4, 0,
1764                                         MLX5_MODI_GTP_TEID};
1765                 if (mask)
1766                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1767                                                      (32 - width));
1768                 break;
1769         case RTE_FLOW_FIELD_TAG:
1770                 {
1771                         int reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG,
1772                                                    data->level, error);
1773                         if (reg < 0)
1774                                 return;
1775                         MLX5_ASSERT(reg != REG_NON);
1776                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1777                         info[idx] = (struct field_modify_info){4, 0,
1778                                                 reg_to_field[reg]};
1779                         if (mask)
1780                                 mask[idx] =
1781                                         rte_cpu_to_be_32(0xffffffff >>
1782                                                          (32 - width));
1783                 }
1784                 break;
1785         case RTE_FLOW_FIELD_MARK:
1786                 {
1787                         int reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK,
1788                                                        0, error);
1789                         if (reg < 0)
1790                                 return;
1791                         MLX5_ASSERT(reg != REG_NON);
1792                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1793                         info[idx] = (struct field_modify_info){4, 0,
1794                                                 reg_to_field[reg]};
1795                         if (mask)
1796                                 mask[idx] =
1797                                         rte_cpu_to_be_32(0xffffffff >>
1798                                                          (32 - width));
1799                 }
1800                 break;
1801         case RTE_FLOW_FIELD_META:
1802                 {
1803                         unsigned int xmeta = config->dv_xmeta_en;
1804                         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1805                         if (reg < 0)
1806                                 return;
1807                         MLX5_ASSERT(reg != REG_NON);
1808                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1809                         if (xmeta == MLX5_XMETA_MODE_META16) {
1810                                 info[idx] = (struct field_modify_info){2, 0,
1811                                                         reg_to_field[reg]};
1812                                 if (mask)
1813                                         mask[idx] = rte_cpu_to_be_16(0xffff >>
1814                                                                 (16 - width));
1815                         } else if (xmeta == MLX5_XMETA_MODE_META32) {
1816                                 info[idx] = (struct field_modify_info){4, 0,
1817                                                         reg_to_field[reg]};
1818                                 if (mask)
1819                                         mask[idx] =
1820                                                 rte_cpu_to_be_32(0xffffffff >>
1821                                                                 (32 - width));
1822                         } else {
1823                                 MLX5_ASSERT(false);
1824                         }
1825                 }
1826                 break;
1827         case RTE_FLOW_FIELD_POINTER:
1828         case RTE_FLOW_FIELD_VALUE:
1829                 if (data->field == RTE_FLOW_FIELD_POINTER)
1830                         memcpy(&val, (void *)(uintptr_t)data->value,
1831                                sizeof(uint64_t));
1832                 else
1833                         val = data->value;
1834                 for (idx = 0; idx < MLX5_ACT_MAX_MOD_FIELDS; idx++) {
1835                         if (mask[idx]) {
1836                                 if (dst_width == 48) {
1837                                         /*special case for MAC addresses */
1838                                         value[idx] = rte_cpu_to_be_16(val);
1839                                         val >>= 16;
1840                                         dst_width -= 16;
1841                                 } else if (dst_width > 16) {
1842                                         value[idx] = rte_cpu_to_be_32(val);
1843                                         val >>= 32;
1844                                 } else if (dst_width > 8) {
1845                                         value[idx] = rte_cpu_to_be_16(val);
1846                                         val >>= 16;
1847                                 } else {
1848                                         value[idx] = (uint8_t)val;
1849                                         val >>= 8;
1850                                 }
1851                                 if (!val)
1852                                         break;
1853                         }
1854                 }
1855                 break;
1856         default:
1857                 MLX5_ASSERT(false);
1858                 break;
1859         }
1860 }
1861
1862 /**
1863  * Convert modify_field action to DV specification.
1864  *
1865  * @param[in] dev
1866  *   Pointer to the rte_eth_dev structure.
1867  * @param[in,out] resource
1868  *   Pointer to the modify-header resource.
1869  * @param[in] action
1870  *   Pointer to action specification.
1871  * @param[in] attr
1872  *   Attributes of flow that includes this item.
1873  * @param[out] error
1874  *   Pointer to the error structure.
1875  *
1876  * @return
1877  *   0 on success, a negative errno value otherwise and rte_errno is set.
1878  */
1879 static int
1880 flow_dv_convert_action_modify_field
1881                         (struct rte_eth_dev *dev,
1882                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1883                          const struct rte_flow_action *action,
1884                          const struct rte_flow_attr *attr,
1885                          struct rte_flow_error *error)
1886 {
1887         struct mlx5_priv *priv = dev->data->dev_private;
1888         struct mlx5_dev_config *config = &priv->config;
1889         const struct rte_flow_action_modify_field *conf =
1890                 (const struct rte_flow_action_modify_field *)(action->conf);
1891         struct rte_flow_item item;
1892         struct field_modify_info field[MLX5_ACT_MAX_MOD_FIELDS] = {
1893                                                                 {0, 0, 0} };
1894         struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS] = {
1895                                                                 {0, 0, 0} };
1896         uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
1897         uint32_t value[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
1898         uint32_t type;
1899         uint32_t dst_width = mlx5_flow_item_field_width(config,
1900                                                         conf->dst.field);
1901
1902         if (conf->src.field == RTE_FLOW_FIELD_POINTER ||
1903                 conf->src.field == RTE_FLOW_FIELD_VALUE) {
1904                 type = MLX5_MODIFICATION_TYPE_SET;
1905                 /** For SET fill the destination field (field) first. */
1906                 mlx5_flow_field_id_to_modify_info(&conf->dst, field, mask,
1907                         value, conf->width, dst_width, dev, attr, error);
1908                 /** Then copy immediate value from source as per mask. */
1909                 mlx5_flow_field_id_to_modify_info(&conf->src, dcopy, mask,
1910                         value, conf->width, dst_width, dev, attr, error);
1911                 item.spec = &value;
1912         } else {
1913                 type = MLX5_MODIFICATION_TYPE_COPY;
1914                 /** For COPY fill the destination field (dcopy) without mask. */
1915                 mlx5_flow_field_id_to_modify_info(&conf->dst, dcopy, NULL,
1916                         value, conf->width, dst_width, dev, attr, error);
1917                 /** Then construct the source field (field) with mask. */
1918                 mlx5_flow_field_id_to_modify_info(&conf->src, field, mask,
1919                         value, conf->width, dst_width, dev, attr, error);
1920         }
1921         item.mask = &mask;
1922         return flow_dv_convert_modify_action(&item,
1923                         field, dcopy, resource, type, error);
1924 }
1925
1926 /**
1927  * Validate MARK item.
1928  *
1929  * @param[in] dev
1930  *   Pointer to the rte_eth_dev structure.
1931  * @param[in] item
1932  *   Item specification.
1933  * @param[in] attr
1934  *   Attributes of flow that includes this item.
1935  * @param[out] error
1936  *   Pointer to error structure.
1937  *
1938  * @return
1939  *   0 on success, a negative errno value otherwise and rte_errno is set.
1940  */
1941 static int
1942 flow_dv_validate_item_mark(struct rte_eth_dev *dev,
1943                            const struct rte_flow_item *item,
1944                            const struct rte_flow_attr *attr __rte_unused,
1945                            struct rte_flow_error *error)
1946 {
1947         struct mlx5_priv *priv = dev->data->dev_private;
1948         struct mlx5_dev_config *config = &priv->config;
1949         const struct rte_flow_item_mark *spec = item->spec;
1950         const struct rte_flow_item_mark *mask = item->mask;
1951         const struct rte_flow_item_mark nic_mask = {
1952                 .id = priv->sh->dv_mark_mask,
1953         };
1954         int ret;
1955
1956         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1957                 return rte_flow_error_set(error, ENOTSUP,
1958                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1959                                           "extended metadata feature"
1960                                           " isn't enabled");
1961         if (!mlx5_flow_ext_mreg_supported(dev))
1962                 return rte_flow_error_set(error, ENOTSUP,
1963                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1964                                           "extended metadata register"
1965                                           " isn't supported");
1966         if (!nic_mask.id)
1967                 return rte_flow_error_set(error, ENOTSUP,
1968                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1969                                           "extended metadata register"
1970                                           " isn't available");
1971         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1972         if (ret < 0)
1973                 return ret;
1974         if (!spec)
1975                 return rte_flow_error_set(error, EINVAL,
1976                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1977                                           item->spec,
1978                                           "data cannot be empty");
1979         if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id))
1980                 return rte_flow_error_set(error, EINVAL,
1981                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1982                                           &spec->id,
1983                                           "mark id exceeds the limit");
1984         if (!mask)
1985                 mask = &nic_mask;
1986         if (!mask->id)
1987                 return rte_flow_error_set(error, EINVAL,
1988                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1989                                         "mask cannot be zero");
1990
1991         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1992                                         (const uint8_t *)&nic_mask,
1993                                         sizeof(struct rte_flow_item_mark),
1994                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1995         if (ret < 0)
1996                 return ret;
1997         return 0;
1998 }
1999
2000 /**
2001  * Validate META item.
2002  *
2003  * @param[in] dev
2004  *   Pointer to the rte_eth_dev structure.
2005  * @param[in] item
2006  *   Item specification.
2007  * @param[in] attr
2008  *   Attributes of flow that includes this item.
2009  * @param[out] error
2010  *   Pointer to error structure.
2011  *
2012  * @return
2013  *   0 on success, a negative errno value otherwise and rte_errno is set.
2014  */
2015 static int
2016 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
2017                            const struct rte_flow_item *item,
2018                            const struct rte_flow_attr *attr,
2019                            struct rte_flow_error *error)
2020 {
2021         struct mlx5_priv *priv = dev->data->dev_private;
2022         struct mlx5_dev_config *config = &priv->config;
2023         const struct rte_flow_item_meta *spec = item->spec;
2024         const struct rte_flow_item_meta *mask = item->mask;
2025         struct rte_flow_item_meta nic_mask = {
2026                 .data = UINT32_MAX
2027         };
2028         int reg;
2029         int ret;
2030
2031         if (!spec)
2032                 return rte_flow_error_set(error, EINVAL,
2033                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2034                                           item->spec,
2035                                           "data cannot be empty");
2036         if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
2037                 if (!mlx5_flow_ext_mreg_supported(dev))
2038                         return rte_flow_error_set(error, ENOTSUP,
2039                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2040                                           "extended metadata register"
2041                                           " isn't supported");
2042                 reg = flow_dv_get_metadata_reg(dev, attr, error);
2043                 if (reg < 0)
2044                         return reg;
2045                 if (reg == REG_NON)
2046                         return rte_flow_error_set(error, ENOTSUP,
2047                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2048                                         "unavalable extended metadata register");
2049                 if (reg == REG_B)
2050                         return rte_flow_error_set(error, ENOTSUP,
2051                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2052                                           "match on reg_b "
2053                                           "isn't supported");
2054                 if (reg != REG_A)
2055                         nic_mask.data = priv->sh->dv_meta_mask;
2056         } else {
2057                 if (attr->transfer)
2058                         return rte_flow_error_set(error, ENOTSUP,
2059                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2060                                         "extended metadata feature "
2061                                         "should be enabled when "
2062                                         "meta item is requested "
2063                                         "with e-switch mode ");
2064                 if (attr->ingress)
2065                         return rte_flow_error_set(error, ENOTSUP,
2066                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2067                                         "match on metadata for ingress "
2068                                         "is not supported in legacy "
2069                                         "metadata mode");
2070         }
2071         if (!mask)
2072                 mask = &rte_flow_item_meta_mask;
2073         if (!mask->data)
2074                 return rte_flow_error_set(error, EINVAL,
2075                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2076                                         "mask cannot be zero");
2077
2078         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2079                                         (const uint8_t *)&nic_mask,
2080                                         sizeof(struct rte_flow_item_meta),
2081                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2082         return ret;
2083 }
2084
2085 /**
2086  * Validate TAG item.
2087  *
2088  * @param[in] dev
2089  *   Pointer to the rte_eth_dev structure.
2090  * @param[in] item
2091  *   Item specification.
2092  * @param[in] attr
2093  *   Attributes of flow that includes this item.
2094  * @param[out] error
2095  *   Pointer to error structure.
2096  *
2097  * @return
2098  *   0 on success, a negative errno value otherwise and rte_errno is set.
2099  */
2100 static int
2101 flow_dv_validate_item_tag(struct rte_eth_dev *dev,
2102                           const struct rte_flow_item *item,
2103                           const struct rte_flow_attr *attr __rte_unused,
2104                           struct rte_flow_error *error)
2105 {
2106         const struct rte_flow_item_tag *spec = item->spec;
2107         const struct rte_flow_item_tag *mask = item->mask;
2108         const struct rte_flow_item_tag nic_mask = {
2109                 .data = RTE_BE32(UINT32_MAX),
2110                 .index = 0xff,
2111         };
2112         int ret;
2113
2114         if (!mlx5_flow_ext_mreg_supported(dev))
2115                 return rte_flow_error_set(error, ENOTSUP,
2116                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2117                                           "extensive metadata register"
2118                                           " isn't supported");
2119         if (!spec)
2120                 return rte_flow_error_set(error, EINVAL,
2121                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2122                                           item->spec,
2123                                           "data cannot be empty");
2124         if (!mask)
2125                 mask = &rte_flow_item_tag_mask;
2126         if (!mask->data)
2127                 return rte_flow_error_set(error, EINVAL,
2128                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2129                                         "mask cannot be zero");
2130
2131         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2132                                         (const uint8_t *)&nic_mask,
2133                                         sizeof(struct rte_flow_item_tag),
2134                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2135         if (ret < 0)
2136                 return ret;
2137         if (mask->index != 0xff)
2138                 return rte_flow_error_set(error, EINVAL,
2139                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2140                                           "partial mask for tag index"
2141                                           " is not supported");
2142         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
2143         if (ret < 0)
2144                 return ret;
2145         MLX5_ASSERT(ret != REG_NON);
2146         return 0;
2147 }
2148
2149 /**
2150  * Validate vport item.
2151  *
2152  * @param[in] dev
2153  *   Pointer to the rte_eth_dev structure.
2154  * @param[in] item
2155  *   Item specification.
2156  * @param[in] attr
2157  *   Attributes of flow that includes this item.
2158  * @param[in] item_flags
2159  *   Bit-fields that holds the items detected until now.
2160  * @param[out] error
2161  *   Pointer to error structure.
2162  *
2163  * @return
2164  *   0 on success, a negative errno value otherwise and rte_errno is set.
2165  */
2166 static int
2167 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
2168                               const struct rte_flow_item *item,
2169                               const struct rte_flow_attr *attr,
2170                               uint64_t item_flags,
2171                               struct rte_flow_error *error)
2172 {
2173         const struct rte_flow_item_port_id *spec = item->spec;
2174         const struct rte_flow_item_port_id *mask = item->mask;
2175         const struct rte_flow_item_port_id switch_mask = {
2176                         .id = 0xffffffff,
2177         };
2178         struct mlx5_priv *esw_priv;
2179         struct mlx5_priv *dev_priv;
2180         int ret;
2181
2182         if (!attr->transfer)
2183                 return rte_flow_error_set(error, EINVAL,
2184                                           RTE_FLOW_ERROR_TYPE_ITEM,
2185                                           NULL,
2186                                           "match on port id is valid only"
2187                                           " when transfer flag is enabled");
2188         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
2189                 return rte_flow_error_set(error, ENOTSUP,
2190                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2191                                           "multiple source ports are not"
2192                                           " supported");
2193         if (!mask)
2194                 mask = &switch_mask;
2195         if (mask->id != 0xffffffff)
2196                 return rte_flow_error_set(error, ENOTSUP,
2197                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2198                                            mask,
2199                                            "no support for partial mask on"
2200                                            " \"id\" field");
2201         ret = mlx5_flow_item_acceptable
2202                                 (item, (const uint8_t *)mask,
2203                                  (const uint8_t *)&rte_flow_item_port_id_mask,
2204                                  sizeof(struct rte_flow_item_port_id),
2205                                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2206         if (ret)
2207                 return ret;
2208         if (!spec)
2209                 return 0;
2210         esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
2211         if (!esw_priv)
2212                 return rte_flow_error_set(error, rte_errno,
2213                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2214                                           "failed to obtain E-Switch info for"
2215                                           " port");
2216         dev_priv = mlx5_dev_to_eswitch_info(dev);
2217         if (!dev_priv)
2218                 return rte_flow_error_set(error, rte_errno,
2219                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2220                                           NULL,
2221                                           "failed to obtain E-Switch info");
2222         if (esw_priv->domain_id != dev_priv->domain_id)
2223                 return rte_flow_error_set(error, EINVAL,
2224                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2225                                           "cannot match on a port from a"
2226                                           " different E-Switch");
2227         return 0;
2228 }
2229
2230 /**
2231  * Validate VLAN item.
2232  *
2233  * @param[in] item
2234  *   Item specification.
2235  * @param[in] item_flags
2236  *   Bit-fields that holds the items detected until now.
2237  * @param[in] dev
2238  *   Ethernet device flow is being created on.
2239  * @param[out] error
2240  *   Pointer to error structure.
2241  *
2242  * @return
2243  *   0 on success, a negative errno value otherwise and rte_errno is set.
2244  */
2245 static int
2246 flow_dv_validate_item_vlan(const struct rte_flow_item *item,
2247                            uint64_t item_flags,
2248                            struct rte_eth_dev *dev,
2249                            struct rte_flow_error *error)
2250 {
2251         const struct rte_flow_item_vlan *mask = item->mask;
2252         const struct rte_flow_item_vlan nic_mask = {
2253                 .tci = RTE_BE16(UINT16_MAX),
2254                 .inner_type = RTE_BE16(UINT16_MAX),
2255                 .has_more_vlan = 1,
2256         };
2257         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2258         int ret;
2259         const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
2260                                         MLX5_FLOW_LAYER_INNER_L4) :
2261                                        (MLX5_FLOW_LAYER_OUTER_L3 |
2262                                         MLX5_FLOW_LAYER_OUTER_L4);
2263         const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
2264                                         MLX5_FLOW_LAYER_OUTER_VLAN;
2265
2266         if (item_flags & vlanm)
2267                 return rte_flow_error_set(error, EINVAL,
2268                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2269                                           "multiple VLAN layers not supported");
2270         else if ((item_flags & l34m) != 0)
2271                 return rte_flow_error_set(error, EINVAL,
2272                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2273                                           "VLAN cannot follow L3/L4 layer");
2274         if (!mask)
2275                 mask = &rte_flow_item_vlan_mask;
2276         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2277                                         (const uint8_t *)&nic_mask,
2278                                         sizeof(struct rte_flow_item_vlan),
2279                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2280         if (ret)
2281                 return ret;
2282         if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
2283                 struct mlx5_priv *priv = dev->data->dev_private;
2284
2285                 if (priv->vmwa_context) {
2286                         /*
2287                          * Non-NULL context means we have a virtual machine
2288                          * and SR-IOV enabled, we have to create VLAN interface
2289                          * to make hypervisor to setup E-Switch vport
2290                          * context correctly. We avoid creating the multiple
2291                          * VLAN interfaces, so we cannot support VLAN tag mask.
2292                          */
2293                         return rte_flow_error_set(error, EINVAL,
2294                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2295                                                   item,
2296                                                   "VLAN tag mask is not"
2297                                                   " supported in virtual"
2298                                                   " environment");
2299                 }
2300         }
2301         return 0;
2302 }
2303
2304 /*
2305  * GTP flags are contained in 1 byte of the format:
2306  * -------------------------------------------
2307  * | bit   | 0 - 2   | 3  | 4   | 5 | 6 | 7  |
2308  * |-----------------------------------------|
2309  * | value | Version | PT | Res | E | S | PN |
2310  * -------------------------------------------
2311  *
2312  * Matching is supported only for GTP flags E, S, PN.
2313  */
2314 #define MLX5_GTP_FLAGS_MASK     0x07
2315
2316 /**
2317  * Validate GTP item.
2318  *
2319  * @param[in] dev
2320  *   Pointer to the rte_eth_dev structure.
2321  * @param[in] item
2322  *   Item specification.
2323  * @param[in] item_flags
2324  *   Bit-fields that holds the items detected until now.
2325  * @param[out] error
2326  *   Pointer to error structure.
2327  *
2328  * @return
2329  *   0 on success, a negative errno value otherwise and rte_errno is set.
2330  */
2331 static int
2332 flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
2333                           const struct rte_flow_item *item,
2334                           uint64_t item_flags,
2335                           struct rte_flow_error *error)
2336 {
2337         struct mlx5_priv *priv = dev->data->dev_private;
2338         const struct rte_flow_item_gtp *spec = item->spec;
2339         const struct rte_flow_item_gtp *mask = item->mask;
2340         const struct rte_flow_item_gtp nic_mask = {
2341                 .v_pt_rsv_flags = MLX5_GTP_FLAGS_MASK,
2342                 .msg_type = 0xff,
2343                 .teid = RTE_BE32(0xffffffff),
2344         };
2345
2346         if (!priv->config.hca_attr.tunnel_stateless_gtp)
2347                 return rte_flow_error_set(error, ENOTSUP,
2348                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2349                                           "GTP support is not enabled");
2350         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2351                 return rte_flow_error_set(error, ENOTSUP,
2352                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2353                                           "multiple tunnel layers not"
2354                                           " supported");
2355         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2356                 return rte_flow_error_set(error, EINVAL,
2357                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2358                                           "no outer UDP layer found");
2359         if (!mask)
2360                 mask = &rte_flow_item_gtp_mask;
2361         if (spec && spec->v_pt_rsv_flags & ~MLX5_GTP_FLAGS_MASK)
2362                 return rte_flow_error_set(error, ENOTSUP,
2363                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2364                                           "Match is supported for GTP"
2365                                           " flags only");
2366         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2367                                          (const uint8_t *)&nic_mask,
2368                                          sizeof(struct rte_flow_item_gtp),
2369                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2370 }
2371
2372 /**
2373  * Validate GTP PSC item.
2374  *
2375  * @param[in] item
2376  *   Item specification.
2377  * @param[in] last_item
2378  *   Previous validated item in the pattern items.
2379  * @param[in] gtp_item
2380  *   Previous GTP item specification.
2381  * @param[in] attr
2382  *   Pointer to flow attributes.
2383  * @param[out] error
2384  *   Pointer to error structure.
2385  *
2386  * @return
2387  *   0 on success, a negative errno value otherwise and rte_errno is set.
2388  */
2389 static int
2390 flow_dv_validate_item_gtp_psc(const struct rte_flow_item *item,
2391                               uint64_t last_item,
2392                               const struct rte_flow_item *gtp_item,
2393                               const struct rte_flow_attr *attr,
2394                               struct rte_flow_error *error)
2395 {
2396         const struct rte_flow_item_gtp *gtp_spec;
2397         const struct rte_flow_item_gtp *gtp_mask;
2398         const struct rte_flow_item_gtp_psc *spec;
2399         const struct rte_flow_item_gtp_psc *mask;
2400         const struct rte_flow_item_gtp_psc nic_mask = {
2401                 .pdu_type = 0xFF,
2402                 .qfi = 0xFF,
2403         };
2404
2405         if (!gtp_item || !(last_item & MLX5_FLOW_LAYER_GTP))
2406                 return rte_flow_error_set
2407                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2408                          "GTP PSC item must be preceded with GTP item");
2409         gtp_spec = gtp_item->spec;
2410         gtp_mask = gtp_item->mask ? gtp_item->mask : &rte_flow_item_gtp_mask;
2411         /* GTP spec and E flag is requested to match zero. */
2412         if (gtp_spec &&
2413                 (gtp_mask->v_pt_rsv_flags &
2414                 ~gtp_spec->v_pt_rsv_flags & MLX5_GTP_EXT_HEADER_FLAG))
2415                 return rte_flow_error_set
2416                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2417                          "GTP E flag must be 1 to match GTP PSC");
2418         /* Check the flow is not created in group zero. */
2419         if (!attr->transfer && !attr->group)
2420                 return rte_flow_error_set
2421                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2422                          "GTP PSC is not supported for group 0");
2423         /* GTP spec is here and E flag is requested to match zero. */
2424         if (!item->spec)
2425                 return 0;
2426         spec = item->spec;
2427         mask = item->mask ? item->mask : &rte_flow_item_gtp_psc_mask;
2428         if (spec->pdu_type > MLX5_GTP_EXT_MAX_PDU_TYPE)
2429                 return rte_flow_error_set
2430                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2431                          "PDU type should be smaller than 16");
2432         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2433                                          (const uint8_t *)&nic_mask,
2434                                          sizeof(struct rte_flow_item_gtp_psc),
2435                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2436 }
2437
2438 /**
2439  * Validate IPV4 item.
2440  * Use existing validation function mlx5_flow_validate_item_ipv4(), and
2441  * add specific validation of fragment_offset field,
2442  *
2443  * @param[in] item
2444  *   Item specification.
2445  * @param[in] item_flags
2446  *   Bit-fields that holds the items detected until now.
2447  * @param[out] error
2448  *   Pointer to error structure.
2449  *
2450  * @return
2451  *   0 on success, a negative errno value otherwise and rte_errno is set.
2452  */
2453 static int
2454 flow_dv_validate_item_ipv4(const struct rte_flow_item *item,
2455                            uint64_t item_flags,
2456                            uint64_t last_item,
2457                            uint16_t ether_type,
2458                            struct rte_flow_error *error)
2459 {
2460         int ret;
2461         const struct rte_flow_item_ipv4 *spec = item->spec;
2462         const struct rte_flow_item_ipv4 *last = item->last;
2463         const struct rte_flow_item_ipv4 *mask = item->mask;
2464         rte_be16_t fragment_offset_spec = 0;
2465         rte_be16_t fragment_offset_last = 0;
2466         const struct rte_flow_item_ipv4 nic_ipv4_mask = {
2467                 .hdr = {
2468                         .src_addr = RTE_BE32(0xffffffff),
2469                         .dst_addr = RTE_BE32(0xffffffff),
2470                         .type_of_service = 0xff,
2471                         .fragment_offset = RTE_BE16(0xffff),
2472                         .next_proto_id = 0xff,
2473                         .time_to_live = 0xff,
2474                 },
2475         };
2476
2477         ret = mlx5_flow_validate_item_ipv4(item, item_flags, last_item,
2478                                            ether_type, &nic_ipv4_mask,
2479                                            MLX5_ITEM_RANGE_ACCEPTED, error);
2480         if (ret < 0)
2481                 return ret;
2482         if (spec && mask)
2483                 fragment_offset_spec = spec->hdr.fragment_offset &
2484                                        mask->hdr.fragment_offset;
2485         if (!fragment_offset_spec)
2486                 return 0;
2487         /*
2488          * spec and mask are valid, enforce using full mask to make sure the
2489          * complete value is used correctly.
2490          */
2491         if ((mask->hdr.fragment_offset & RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2492                         != RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2493                 return rte_flow_error_set(error, EINVAL,
2494                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2495                                           item, "must use full mask for"
2496                                           " fragment_offset");
2497         /*
2498          * Match on fragment_offset 0x2000 means MF is 1 and frag-offset is 0,
2499          * indicating this is 1st fragment of fragmented packet.
2500          * This is not yet supported in MLX5, return appropriate error message.
2501          */
2502         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG))
2503                 return rte_flow_error_set(error, ENOTSUP,
2504                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2505                                           "match on first fragment not "
2506                                           "supported");
2507         if (fragment_offset_spec && !last)
2508                 return rte_flow_error_set(error, ENOTSUP,
2509                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2510                                           "specified value not supported");
2511         /* spec and last are valid, validate the specified range. */
2512         fragment_offset_last = last->hdr.fragment_offset &
2513                                mask->hdr.fragment_offset;
2514         /*
2515          * Match on fragment_offset spec 0x2001 and last 0x3fff
2516          * means MF is 1 and frag-offset is > 0.
2517          * This packet is fragment 2nd and onward, excluding last.
2518          * This is not yet supported in MLX5, return appropriate
2519          * error message.
2520          */
2521         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG + 1) &&
2522             fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2523                 return rte_flow_error_set(error, ENOTSUP,
2524                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2525                                           last, "match on following "
2526                                           "fragments not supported");
2527         /*
2528          * Match on fragment_offset spec 0x0001 and last 0x1fff
2529          * means MF is 0 and frag-offset is > 0.
2530          * This packet is last fragment of fragmented packet.
2531          * This is not yet supported in MLX5, return appropriate
2532          * error message.
2533          */
2534         if (fragment_offset_spec == RTE_BE16(1) &&
2535             fragment_offset_last == RTE_BE16(RTE_IPV4_HDR_OFFSET_MASK))
2536                 return rte_flow_error_set(error, ENOTSUP,
2537                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2538                                           last, "match on last "
2539                                           "fragment not supported");
2540         /*
2541          * Match on fragment_offset spec 0x0001 and last 0x3fff
2542          * means MF and/or frag-offset is not 0.
2543          * This is a fragmented packet.
2544          * Other range values are invalid and rejected.
2545          */
2546         if (!(fragment_offset_spec == RTE_BE16(1) &&
2547               fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)))
2548                 return rte_flow_error_set(error, ENOTSUP,
2549                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2550                                           "specified range not supported");
2551         return 0;
2552 }
2553
2554 /**
2555  * Validate IPV6 fragment extension item.
2556  *
2557  * @param[in] item
2558  *   Item specification.
2559  * @param[in] item_flags
2560  *   Bit-fields that holds the items detected until now.
2561  * @param[out] error
2562  *   Pointer to error structure.
2563  *
2564  * @return
2565  *   0 on success, a negative errno value otherwise and rte_errno is set.
2566  */
2567 static int
2568 flow_dv_validate_item_ipv6_frag_ext(const struct rte_flow_item *item,
2569                                     uint64_t item_flags,
2570                                     struct rte_flow_error *error)
2571 {
2572         const struct rte_flow_item_ipv6_frag_ext *spec = item->spec;
2573         const struct rte_flow_item_ipv6_frag_ext *last = item->last;
2574         const struct rte_flow_item_ipv6_frag_ext *mask = item->mask;
2575         rte_be16_t frag_data_spec = 0;
2576         rte_be16_t frag_data_last = 0;
2577         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2578         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2579                                       MLX5_FLOW_LAYER_OUTER_L4;
2580         int ret = 0;
2581         struct rte_flow_item_ipv6_frag_ext nic_mask = {
2582                 .hdr = {
2583                         .next_header = 0xff,
2584                         .frag_data = RTE_BE16(0xffff),
2585                 },
2586         };
2587
2588         if (item_flags & l4m)
2589                 return rte_flow_error_set(error, EINVAL,
2590                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2591                                           "ipv6 fragment extension item cannot "
2592                                           "follow L4 item.");
2593         if ((tunnel && !(item_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
2594             (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6)))
2595                 return rte_flow_error_set(error, EINVAL,
2596                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2597                                           "ipv6 fragment extension item must "
2598                                           "follow ipv6 item");
2599         if (spec && mask)
2600                 frag_data_spec = spec->hdr.frag_data & mask->hdr.frag_data;
2601         if (!frag_data_spec)
2602                 return 0;
2603         /*
2604          * spec and mask are valid, enforce using full mask to make sure the
2605          * complete value is used correctly.
2606          */
2607         if ((mask->hdr.frag_data & RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) !=
2608                                 RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2609                 return rte_flow_error_set(error, EINVAL,
2610                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2611                                           item, "must use full mask for"
2612                                           " frag_data");
2613         /*
2614          * Match on frag_data 0x00001 means M is 1 and frag-offset is 0.
2615          * This is 1st fragment of fragmented packet.
2616          */
2617         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_MF_MASK))
2618                 return rte_flow_error_set(error, ENOTSUP,
2619                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2620                                           "match on first fragment not "
2621                                           "supported");
2622         if (frag_data_spec && !last)
2623                 return rte_flow_error_set(error, EINVAL,
2624                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2625                                           "specified value not supported");
2626         ret = mlx5_flow_item_acceptable
2627                                 (item, (const uint8_t *)mask,
2628                                  (const uint8_t *)&nic_mask,
2629                                  sizeof(struct rte_flow_item_ipv6_frag_ext),
2630                                  MLX5_ITEM_RANGE_ACCEPTED, error);
2631         if (ret)
2632                 return ret;
2633         /* spec and last are valid, validate the specified range. */
2634         frag_data_last = last->hdr.frag_data & mask->hdr.frag_data;
2635         /*
2636          * Match on frag_data spec 0x0009 and last 0xfff9
2637          * means M is 1 and frag-offset is > 0.
2638          * This packet is fragment 2nd and onward, excluding last.
2639          * This is not yet supported in MLX5, return appropriate
2640          * error message.
2641          */
2642         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN |
2643                                        RTE_IPV6_EHDR_MF_MASK) &&
2644             frag_data_last == RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2645                 return rte_flow_error_set(error, ENOTSUP,
2646                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2647                                           last, "match on following "
2648                                           "fragments not supported");
2649         /*
2650          * Match on frag_data spec 0x0008 and last 0xfff8
2651          * means M is 0 and frag-offset is > 0.
2652          * This packet is last fragment of fragmented packet.
2653          * This is not yet supported in MLX5, return appropriate
2654          * error message.
2655          */
2656         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN) &&
2657             frag_data_last == RTE_BE16(RTE_IPV6_EHDR_FO_MASK))
2658                 return rte_flow_error_set(error, ENOTSUP,
2659                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2660                                           last, "match on last "
2661                                           "fragment not supported");
2662         /* Other range values are invalid and rejected. */
2663         return rte_flow_error_set(error, EINVAL,
2664                                   RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2665                                   "specified range not supported");
2666 }
2667
2668 /*
2669  * Validate ASO CT item.
2670  *
2671  * @param[in] dev
2672  *   Pointer to the rte_eth_dev structure.
2673  * @param[in] item
2674  *   Item specification.
2675  * @param[in] item_flags
2676  *   Pointer to bit-fields that holds the items detected until now.
2677  * @param[out] error
2678  *   Pointer to error structure.
2679  *
2680  * @return
2681  *   0 on success, a negative errno value otherwise and rte_errno is set.
2682  */
2683 static int
2684 flow_dv_validate_item_aso_ct(struct rte_eth_dev *dev,
2685                              const struct rte_flow_item *item,
2686                              uint64_t *item_flags,
2687                              struct rte_flow_error *error)
2688 {
2689         const struct rte_flow_item_conntrack *spec = item->spec;
2690         const struct rte_flow_item_conntrack *mask = item->mask;
2691         RTE_SET_USED(dev);
2692         uint32_t flags;
2693
2694         if (*item_flags & MLX5_FLOW_LAYER_ASO_CT)
2695                 return rte_flow_error_set(error, EINVAL,
2696                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2697                                           "Only one CT is supported");
2698         if (!mask)
2699                 mask = &rte_flow_item_conntrack_mask;
2700         flags = spec->flags & mask->flags;
2701         if ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID) &&
2702             ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID) ||
2703              (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD) ||
2704              (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)))
2705                 return rte_flow_error_set(error, EINVAL,
2706                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2707                                           "Conflict status bits");
2708         /* State change also needs to be considered. */
2709         *item_flags |= MLX5_FLOW_LAYER_ASO_CT;
2710         return 0;
2711 }
2712
2713 /**
2714  * Validate the pop VLAN action.
2715  *
2716  * @param[in] dev
2717  *   Pointer to the rte_eth_dev structure.
2718  * @param[in] action_flags
2719  *   Holds the actions detected until now.
2720  * @param[in] action
2721  *   Pointer to the pop vlan action.
2722  * @param[in] item_flags
2723  *   The items found in this flow rule.
2724  * @param[in] attr
2725  *   Pointer to flow attributes.
2726  * @param[out] error
2727  *   Pointer to error structure.
2728  *
2729  * @return
2730  *   0 on success, a negative errno value otherwise and rte_errno is set.
2731  */
2732 static int
2733 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
2734                                  uint64_t action_flags,
2735                                  const struct rte_flow_action *action,
2736                                  uint64_t item_flags,
2737                                  const struct rte_flow_attr *attr,
2738                                  struct rte_flow_error *error)
2739 {
2740         const struct mlx5_priv *priv = dev->data->dev_private;
2741
2742         (void)action;
2743         (void)attr;
2744         if (!priv->sh->pop_vlan_action)
2745                 return rte_flow_error_set(error, ENOTSUP,
2746                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2747                                           NULL,
2748                                           "pop vlan action is not supported");
2749         if (attr->egress)
2750                 return rte_flow_error_set(error, ENOTSUP,
2751                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2752                                           NULL,
2753                                           "pop vlan action not supported for "
2754                                           "egress");
2755         if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
2756                 return rte_flow_error_set(error, ENOTSUP,
2757                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2758                                           "no support for multiple VLAN "
2759                                           "actions");
2760         /* Pop VLAN with preceding Decap requires inner header with VLAN. */
2761         if ((action_flags & MLX5_FLOW_ACTION_DECAP) &&
2762             !(item_flags & MLX5_FLOW_LAYER_INNER_VLAN))
2763                 return rte_flow_error_set(error, ENOTSUP,
2764                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2765                                           NULL,
2766                                           "cannot pop vlan after decap without "
2767                                           "match on inner vlan in the flow");
2768         /* Pop VLAN without preceding Decap requires outer header with VLAN. */
2769         if (!(action_flags & MLX5_FLOW_ACTION_DECAP) &&
2770             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2771                 return rte_flow_error_set(error, ENOTSUP,
2772                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2773                                           NULL,
2774                                           "cannot pop vlan without a "
2775                                           "match on (outer) vlan in the flow");
2776         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2777                 return rte_flow_error_set(error, EINVAL,
2778                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2779                                           "wrong action order, port_id should "
2780                                           "be after pop VLAN action");
2781         if (!attr->transfer && priv->representor)
2782                 return rte_flow_error_set(error, ENOTSUP,
2783                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2784                                           "pop vlan action for VF representor "
2785                                           "not supported on NIC table");
2786         return 0;
2787 }
2788
2789 /**
2790  * Get VLAN default info from vlan match info.
2791  *
2792  * @param[in] items
2793  *   the list of item specifications.
2794  * @param[out] vlan
2795  *   pointer VLAN info to fill to.
2796  *
2797  * @return
2798  *   0 on success, a negative errno value otherwise and rte_errno is set.
2799  */
2800 static void
2801 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
2802                                   struct rte_vlan_hdr *vlan)
2803 {
2804         const struct rte_flow_item_vlan nic_mask = {
2805                 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
2806                                 MLX5DV_FLOW_VLAN_VID_MASK),
2807                 .inner_type = RTE_BE16(0xffff),
2808         };
2809
2810         if (items == NULL)
2811                 return;
2812         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2813                 int type = items->type;
2814
2815                 if (type == RTE_FLOW_ITEM_TYPE_VLAN ||
2816                     type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
2817                         break;
2818         }
2819         if (items->type != RTE_FLOW_ITEM_TYPE_END) {
2820                 const struct rte_flow_item_vlan *vlan_m = items->mask;
2821                 const struct rte_flow_item_vlan *vlan_v = items->spec;
2822
2823                 /* If VLAN item in pattern doesn't contain data, return here. */
2824                 if (!vlan_v)
2825                         return;
2826                 if (!vlan_m)
2827                         vlan_m = &nic_mask;
2828                 /* Only full match values are accepted */
2829                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
2830                      MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
2831                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
2832                         vlan->vlan_tci |=
2833                                 rte_be_to_cpu_16(vlan_v->tci &
2834                                                  MLX5DV_FLOW_VLAN_PCP_MASK_BE);
2835                 }
2836                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
2837                      MLX5DV_FLOW_VLAN_VID_MASK_BE) {
2838                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
2839                         vlan->vlan_tci |=
2840                                 rte_be_to_cpu_16(vlan_v->tci &
2841                                                  MLX5DV_FLOW_VLAN_VID_MASK_BE);
2842                 }
2843                 if (vlan_m->inner_type == nic_mask.inner_type)
2844                         vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
2845                                                            vlan_m->inner_type);
2846         }
2847 }
2848
2849 /**
2850  * Validate the push VLAN action.
2851  *
2852  * @param[in] dev
2853  *   Pointer to the rte_eth_dev structure.
2854  * @param[in] action_flags
2855  *   Holds the actions detected until now.
2856  * @param[in] item_flags
2857  *   The items found in this flow rule.
2858  * @param[in] action
2859  *   Pointer to the action structure.
2860  * @param[in] attr
2861  *   Pointer to flow attributes
2862  * @param[out] error
2863  *   Pointer to error structure.
2864  *
2865  * @return
2866  *   0 on success, a negative errno value otherwise and rte_errno is set.
2867  */
2868 static int
2869 flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
2870                                   uint64_t action_flags,
2871                                   const struct rte_flow_item_vlan *vlan_m,
2872                                   const struct rte_flow_action *action,
2873                                   const struct rte_flow_attr *attr,
2874                                   struct rte_flow_error *error)
2875 {
2876         const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
2877         const struct mlx5_priv *priv = dev->data->dev_private;
2878
2879         if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
2880             push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
2881                 return rte_flow_error_set(error, EINVAL,
2882                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2883                                           "invalid vlan ethertype");
2884         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2885                 return rte_flow_error_set(error, EINVAL,
2886                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2887                                           "wrong action order, port_id should "
2888                                           "be after push VLAN");
2889         if (!attr->transfer && priv->representor)
2890                 return rte_flow_error_set(error, ENOTSUP,
2891                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2892                                           "push vlan action for VF representor "
2893                                           "not supported on NIC table");
2894         if (vlan_m &&
2895             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) &&
2896             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) !=
2897                 MLX5DV_FLOW_VLAN_PCP_MASK_BE &&
2898             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) &&
2899             !(mlx5_flow_find_action
2900                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)))
2901                 return rte_flow_error_set(error, EINVAL,
2902                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2903                                           "not full match mask on VLAN PCP and "
2904                                           "there is no of_set_vlan_pcp action, "
2905                                           "push VLAN action cannot figure out "
2906                                           "PCP value");
2907         if (vlan_m &&
2908             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) &&
2909             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) !=
2910                 MLX5DV_FLOW_VLAN_VID_MASK_BE &&
2911             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) &&
2912             !(mlx5_flow_find_action
2913                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)))
2914                 return rte_flow_error_set(error, EINVAL,
2915                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2916                                           "not full match mask on VLAN VID and "
2917                                           "there is no of_set_vlan_vid action, "
2918                                           "push VLAN action cannot figure out "
2919                                           "VID value");
2920         (void)attr;
2921         return 0;
2922 }
2923
2924 /**
2925  * Validate the set VLAN PCP.
2926  *
2927  * @param[in] action_flags
2928  *   Holds the actions detected until now.
2929  * @param[in] actions
2930  *   Pointer to the list of actions remaining in the flow rule.
2931  * @param[out] error
2932  *   Pointer to error structure.
2933  *
2934  * @return
2935  *   0 on success, a negative errno value otherwise and rte_errno is set.
2936  */
2937 static int
2938 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
2939                                      const struct rte_flow_action actions[],
2940                                      struct rte_flow_error *error)
2941 {
2942         const struct rte_flow_action *action = actions;
2943         const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
2944
2945         if (conf->vlan_pcp > 7)
2946                 return rte_flow_error_set(error, EINVAL,
2947                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2948                                           "VLAN PCP value is too big");
2949         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
2950                 return rte_flow_error_set(error, ENOTSUP,
2951                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2952                                           "set VLAN PCP action must follow "
2953                                           "the push VLAN action");
2954         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
2955                 return rte_flow_error_set(error, ENOTSUP,
2956                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2957                                           "Multiple VLAN PCP modification are "
2958                                           "not supported");
2959         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2960                 return rte_flow_error_set(error, EINVAL,
2961                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2962                                           "wrong action order, port_id should "
2963                                           "be after set VLAN PCP");
2964         return 0;
2965 }
2966
2967 /**
2968  * Validate the set VLAN VID.
2969  *
2970  * @param[in] item_flags
2971  *   Holds the items detected in this rule.
2972  * @param[in] action_flags
2973  *   Holds the actions detected until now.
2974  * @param[in] actions
2975  *   Pointer to the list of actions remaining in the flow rule.
2976  * @param[out] error
2977  *   Pointer to error structure.
2978  *
2979  * @return
2980  *   0 on success, a negative errno value otherwise and rte_errno is set.
2981  */
2982 static int
2983 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
2984                                      uint64_t action_flags,
2985                                      const struct rte_flow_action actions[],
2986                                      struct rte_flow_error *error)
2987 {
2988         const struct rte_flow_action *action = actions;
2989         const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
2990
2991         if (rte_be_to_cpu_16(conf->vlan_vid) > 0xFFE)
2992                 return rte_flow_error_set(error, EINVAL,
2993                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2994                                           "VLAN VID value is too big");
2995         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
2996             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2997                 return rte_flow_error_set(error, ENOTSUP,
2998                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2999                                           "set VLAN VID action must follow push"
3000                                           " VLAN action or match on VLAN item");
3001         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
3002                 return rte_flow_error_set(error, ENOTSUP,
3003                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3004                                           "Multiple VLAN VID modifications are "
3005                                           "not supported");
3006         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
3007                 return rte_flow_error_set(error, EINVAL,
3008                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3009                                           "wrong action order, port_id should "
3010                                           "be after set VLAN VID");
3011         return 0;
3012 }
3013
3014 /*
3015  * Validate the FLAG action.
3016  *
3017  * @param[in] dev
3018  *   Pointer to the rte_eth_dev structure.
3019  * @param[in] action_flags
3020  *   Holds the actions detected until now.
3021  * @param[in] attr
3022  *   Pointer to flow attributes
3023  * @param[out] error
3024  *   Pointer to error structure.
3025  *
3026  * @return
3027  *   0 on success, a negative errno value otherwise and rte_errno is set.
3028  */
3029 static int
3030 flow_dv_validate_action_flag(struct rte_eth_dev *dev,
3031                              uint64_t action_flags,
3032                              const struct rte_flow_attr *attr,
3033                              struct rte_flow_error *error)
3034 {
3035         struct mlx5_priv *priv = dev->data->dev_private;
3036         struct mlx5_dev_config *config = &priv->config;
3037         int ret;
3038
3039         /* Fall back if no extended metadata register support. */
3040         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
3041                 return mlx5_flow_validate_action_flag(action_flags, attr,
3042                                                       error);
3043         /* Extensive metadata mode requires registers. */
3044         if (!mlx5_flow_ext_mreg_supported(dev))
3045                 return rte_flow_error_set(error, ENOTSUP,
3046                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3047                                           "no metadata registers "
3048                                           "to support flag action");
3049         if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT))
3050                 return rte_flow_error_set(error, ENOTSUP,
3051                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3052                                           "extended metadata register"
3053                                           " isn't available");
3054         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3055         if (ret < 0)
3056                 return ret;
3057         MLX5_ASSERT(ret > 0);
3058         if (action_flags & MLX5_FLOW_ACTION_MARK)
3059                 return rte_flow_error_set(error, EINVAL,
3060                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3061                                           "can't mark and flag in same flow");
3062         if (action_flags & MLX5_FLOW_ACTION_FLAG)
3063                 return rte_flow_error_set(error, EINVAL,
3064                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3065                                           "can't have 2 flag"
3066                                           " actions in same flow");
3067         return 0;
3068 }
3069
3070 /**
3071  * Validate MARK action.
3072  *
3073  * @param[in] dev
3074  *   Pointer to the rte_eth_dev structure.
3075  * @param[in] action
3076  *   Pointer to action.
3077  * @param[in] action_flags
3078  *   Holds the actions detected until now.
3079  * @param[in] attr
3080  *   Pointer to flow attributes
3081  * @param[out] error
3082  *   Pointer to error structure.
3083  *
3084  * @return
3085  *   0 on success, a negative errno value otherwise and rte_errno is set.
3086  */
3087 static int
3088 flow_dv_validate_action_mark(struct rte_eth_dev *dev,
3089                              const struct rte_flow_action *action,
3090                              uint64_t action_flags,
3091                              const struct rte_flow_attr *attr,
3092                              struct rte_flow_error *error)
3093 {
3094         struct mlx5_priv *priv = dev->data->dev_private;
3095         struct mlx5_dev_config *config = &priv->config;
3096         const struct rte_flow_action_mark *mark = action->conf;
3097         int ret;
3098
3099         if (is_tunnel_offload_active(dev))
3100                 return rte_flow_error_set(error, ENOTSUP,
3101                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3102                                           "no mark action "
3103                                           "if tunnel offload active");
3104         /* Fall back if no extended metadata register support. */
3105         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
3106                 return mlx5_flow_validate_action_mark(action, action_flags,
3107                                                       attr, error);
3108         /* Extensive metadata mode requires registers. */
3109         if (!mlx5_flow_ext_mreg_supported(dev))
3110                 return rte_flow_error_set(error, ENOTSUP,
3111                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3112                                           "no metadata registers "
3113                                           "to support mark action");
3114         if (!priv->sh->dv_mark_mask)
3115                 return rte_flow_error_set(error, ENOTSUP,
3116                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3117                                           "extended metadata register"
3118                                           " isn't available");
3119         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3120         if (ret < 0)
3121                 return ret;
3122         MLX5_ASSERT(ret > 0);
3123         if (!mark)
3124                 return rte_flow_error_set(error, EINVAL,
3125                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3126                                           "configuration cannot be null");
3127         if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask))
3128                 return rte_flow_error_set(error, EINVAL,
3129                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3130                                           &mark->id,
3131                                           "mark id exceeds the limit");
3132         if (action_flags & MLX5_FLOW_ACTION_FLAG)
3133                 return rte_flow_error_set(error, EINVAL,
3134                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3135                                           "can't flag and mark in same flow");
3136         if (action_flags & MLX5_FLOW_ACTION_MARK)
3137                 return rte_flow_error_set(error, EINVAL,
3138                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3139                                           "can't have 2 mark actions in same"
3140                                           " flow");
3141         return 0;
3142 }
3143
3144 /**
3145  * Validate SET_META action.
3146  *
3147  * @param[in] dev
3148  *   Pointer to the rte_eth_dev structure.
3149  * @param[in] action
3150  *   Pointer to the action structure.
3151  * @param[in] action_flags
3152  *   Holds the actions detected until now.
3153  * @param[in] attr
3154  *   Pointer to flow attributes
3155  * @param[out] error
3156  *   Pointer to error structure.
3157  *
3158  * @return
3159  *   0 on success, a negative errno value otherwise and rte_errno is set.
3160  */
3161 static int
3162 flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
3163                                  const struct rte_flow_action *action,
3164                                  uint64_t action_flags __rte_unused,
3165                                  const struct rte_flow_attr *attr,
3166                                  struct rte_flow_error *error)
3167 {
3168         const struct rte_flow_action_set_meta *conf;
3169         uint32_t nic_mask = UINT32_MAX;
3170         int reg;
3171
3172         if (!mlx5_flow_ext_mreg_supported(dev))
3173                 return rte_flow_error_set(error, ENOTSUP,
3174                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3175                                           "extended metadata register"
3176                                           " isn't supported");
3177         reg = flow_dv_get_metadata_reg(dev, attr, error);
3178         if (reg < 0)
3179                 return reg;
3180         if (reg == REG_NON)
3181                 return rte_flow_error_set(error, ENOTSUP,
3182                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3183                                           "unavalable extended metadata register");
3184         if (reg != REG_A && reg != REG_B) {
3185                 struct mlx5_priv *priv = dev->data->dev_private;
3186
3187                 nic_mask = priv->sh->dv_meta_mask;
3188         }
3189         if (!(action->conf))
3190                 return rte_flow_error_set(error, EINVAL,
3191                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3192                                           "configuration cannot be null");
3193         conf = (const struct rte_flow_action_set_meta *)action->conf;
3194         if (!conf->mask)
3195                 return rte_flow_error_set(error, EINVAL,
3196                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3197                                           "zero mask doesn't have any effect");
3198         if (conf->mask & ~nic_mask)
3199                 return rte_flow_error_set(error, EINVAL,
3200                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3201                                           "meta data must be within reg C0");
3202         return 0;
3203 }
3204
3205 /**
3206  * Validate SET_TAG action.
3207  *
3208  * @param[in] dev
3209  *   Pointer to the rte_eth_dev structure.
3210  * @param[in] action
3211  *   Pointer to the action structure.
3212  * @param[in] action_flags
3213  *   Holds the actions detected until now.
3214  * @param[in] attr
3215  *   Pointer to flow attributes
3216  * @param[out] error
3217  *   Pointer to error structure.
3218  *
3219  * @return
3220  *   0 on success, a negative errno value otherwise and rte_errno is set.
3221  */
3222 static int
3223 flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
3224                                 const struct rte_flow_action *action,
3225                                 uint64_t action_flags,
3226                                 const struct rte_flow_attr *attr,
3227                                 struct rte_flow_error *error)
3228 {
3229         const struct rte_flow_action_set_tag *conf;
3230         const uint64_t terminal_action_flags =
3231                 MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE |
3232                 MLX5_FLOW_ACTION_RSS;
3233         int ret;
3234
3235         if (!mlx5_flow_ext_mreg_supported(dev))
3236                 return rte_flow_error_set(error, ENOTSUP,
3237                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3238                                           "extensive metadata register"
3239                                           " isn't supported");
3240         if (!(action->conf))
3241                 return rte_flow_error_set(error, EINVAL,
3242                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3243                                           "configuration cannot be null");
3244         conf = (const struct rte_flow_action_set_tag *)action->conf;
3245         if (!conf->mask)
3246                 return rte_flow_error_set(error, EINVAL,
3247                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3248                                           "zero mask doesn't have any effect");
3249         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
3250         if (ret < 0)
3251                 return ret;
3252         if (!attr->transfer && attr->ingress &&
3253             (action_flags & terminal_action_flags))
3254                 return rte_flow_error_set(error, EINVAL,
3255                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3256                                           "set_tag has no effect"
3257                                           " with terminal actions");
3258         return 0;
3259 }
3260
3261 /**
3262  * Check if action counter is shared by either old or new mechanism.
3263  *
3264  * @param[in] action
3265  *   Pointer to the action structure.
3266  *
3267  * @return
3268  *   True when counter is shared, false otherwise.
3269  */
3270 static inline bool
3271 is_shared_action_count(const struct rte_flow_action *action)
3272 {
3273         const struct rte_flow_action_count *count =
3274                         (const struct rte_flow_action_count *)action->conf;
3275
3276         if ((int)action->type == MLX5_RTE_FLOW_ACTION_TYPE_COUNT)
3277                 return true;
3278         return !!(count && count->shared);
3279 }
3280
3281 /**
3282  * Validate count action.
3283  *
3284  * @param[in] dev
3285  *   Pointer to rte_eth_dev structure.
3286  * @param[in] shared
3287  *   Indicator if action is shared.
3288  * @param[in] action_flags
3289  *   Holds the actions detected until now.
3290  * @param[out] error
3291  *   Pointer to error structure.
3292  *
3293  * @return
3294  *   0 on success, a negative errno value otherwise and rte_errno is set.
3295  */
3296 static int
3297 flow_dv_validate_action_count(struct rte_eth_dev *dev, bool shared,
3298                               uint64_t action_flags,
3299                               struct rte_flow_error *error)
3300 {
3301         struct mlx5_priv *priv = dev->data->dev_private;
3302
3303         if (!priv->config.devx)
3304                 goto notsup_err;
3305         if (action_flags & MLX5_FLOW_ACTION_COUNT)
3306                 return rte_flow_error_set(error, EINVAL,
3307                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3308                                           "duplicate count actions set");
3309         if (shared && (action_flags & MLX5_FLOW_ACTION_AGE) &&
3310             !priv->sh->flow_hit_aso_en)
3311                 return rte_flow_error_set(error, EINVAL,
3312                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3313                                           "old age and shared count combination is not supported");
3314 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
3315         return 0;
3316 #endif
3317 notsup_err:
3318         return rte_flow_error_set
3319                       (error, ENOTSUP,
3320                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3321                        NULL,
3322                        "count action not supported");
3323 }
3324
3325 /**
3326  * Validate the L2 encap action.
3327  *
3328  * @param[in] dev
3329  *   Pointer to the rte_eth_dev structure.
3330  * @param[in] action_flags
3331  *   Holds the actions detected until now.
3332  * @param[in] action
3333  *   Pointer to the action structure.
3334  * @param[in] attr
3335  *   Pointer to flow attributes.
3336  * @param[out] error
3337  *   Pointer to error structure.
3338  *
3339  * @return
3340  *   0 on success, a negative errno value otherwise and rte_errno is set.
3341  */
3342 static int
3343 flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
3344                                  uint64_t action_flags,
3345                                  const struct rte_flow_action *action,
3346                                  const struct rte_flow_attr *attr,
3347                                  struct rte_flow_error *error)
3348 {
3349         const struct mlx5_priv *priv = dev->data->dev_private;
3350
3351         if (!(action->conf))
3352                 return rte_flow_error_set(error, EINVAL,
3353                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3354                                           "configuration cannot be null");
3355         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3356                 return rte_flow_error_set(error, EINVAL,
3357                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3358                                           "can only have a single encap action "
3359                                           "in a flow");
3360         if (!attr->transfer && priv->representor)
3361                 return rte_flow_error_set(error, ENOTSUP,
3362                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3363                                           "encap action for VF representor "
3364                                           "not supported on NIC table");
3365         return 0;
3366 }
3367
3368 /**
3369  * Validate a decap action.
3370  *
3371  * @param[in] dev
3372  *   Pointer to the rte_eth_dev structure.
3373  * @param[in] action_flags
3374  *   Holds the actions detected until now.
3375  * @param[in] action
3376  *   Pointer to the action structure.
3377  * @param[in] item_flags
3378  *   Holds the items detected.
3379  * @param[in] attr
3380  *   Pointer to flow attributes
3381  * @param[out] error
3382  *   Pointer to error structure.
3383  *
3384  * @return
3385  *   0 on success, a negative errno value otherwise and rte_errno is set.
3386  */
3387 static int
3388 flow_dv_validate_action_decap(struct rte_eth_dev *dev,
3389                               uint64_t action_flags,
3390                               const struct rte_flow_action *action,
3391                               const uint64_t item_flags,
3392                               const struct rte_flow_attr *attr,
3393                               struct rte_flow_error *error)
3394 {
3395         const struct mlx5_priv *priv = dev->data->dev_private;
3396
3397         if (priv->config.hca_attr.scatter_fcs_w_decap_disable &&
3398             !priv->config.decap_en)
3399                 return rte_flow_error_set(error, ENOTSUP,
3400                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3401                                           "decap is not enabled");
3402         if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
3403                 return rte_flow_error_set(error, ENOTSUP,
3404                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3405                                           action_flags &
3406                                           MLX5_FLOW_ACTION_DECAP ? "can only "
3407                                           "have a single decap action" : "decap "
3408                                           "after encap is not supported");
3409         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
3410                 return rte_flow_error_set(error, EINVAL,
3411                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3412                                           "can't have decap action after"
3413                                           " modify action");
3414         if (attr->egress)
3415                 return rte_flow_error_set(error, ENOTSUP,
3416                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
3417                                           NULL,
3418                                           "decap action not supported for "
3419                                           "egress");
3420         if (!attr->transfer && priv->representor)
3421                 return rte_flow_error_set(error, ENOTSUP,
3422                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3423                                           "decap action for VF representor "
3424                                           "not supported on NIC table");
3425         if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP &&
3426             !(item_flags & MLX5_FLOW_LAYER_VXLAN))
3427                 return rte_flow_error_set(error, ENOTSUP,
3428                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3429                                 "VXLAN item should be present for VXLAN decap");
3430         return 0;
3431 }
3432
3433 const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,};
3434
3435 /**
3436  * Validate the raw encap and decap actions.
3437  *
3438  * @param[in] dev
3439  *   Pointer to the rte_eth_dev structure.
3440  * @param[in] decap
3441  *   Pointer to the decap action.
3442  * @param[in] encap
3443  *   Pointer to the encap action.
3444  * @param[in] attr
3445  *   Pointer to flow attributes
3446  * @param[in/out] action_flags
3447  *   Holds the actions detected until now.
3448  * @param[out] actions_n
3449  *   pointer to the number of actions counter.
3450  * @param[in] action
3451  *   Pointer to the action structure.
3452  * @param[in] item_flags
3453  *   Holds the items detected.
3454  * @param[out] error
3455  *   Pointer to error structure.
3456  *
3457  * @return
3458  *   0 on success, a negative errno value otherwise and rte_errno is set.
3459  */
3460 static int
3461 flow_dv_validate_action_raw_encap_decap
3462         (struct rte_eth_dev *dev,
3463          const struct rte_flow_action_raw_decap *decap,
3464          const struct rte_flow_action_raw_encap *encap,
3465          const struct rte_flow_attr *attr, uint64_t *action_flags,
3466          int *actions_n, const struct rte_flow_action *action,
3467          uint64_t item_flags, struct rte_flow_error *error)
3468 {
3469         const struct mlx5_priv *priv = dev->data->dev_private;
3470         int ret;
3471
3472         if (encap && (!encap->size || !encap->data))
3473                 return rte_flow_error_set(error, EINVAL,
3474                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3475                                           "raw encap data cannot be empty");
3476         if (decap && encap) {
3477                 if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE &&
3478                     encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
3479                         /* L3 encap. */
3480                         decap = NULL;
3481                 else if (encap->size <=
3482                            MLX5_ENCAPSULATION_DECISION_SIZE &&
3483                            decap->size >
3484                            MLX5_ENCAPSULATION_DECISION_SIZE)
3485                         /* L3 decap. */
3486                         encap = NULL;
3487                 else if (encap->size >
3488                            MLX5_ENCAPSULATION_DECISION_SIZE &&
3489                            decap->size >
3490                            MLX5_ENCAPSULATION_DECISION_SIZE)
3491                         /* 2 L2 actions: encap and decap. */
3492                         ;
3493                 else
3494                         return rte_flow_error_set(error,
3495                                 ENOTSUP,
3496                                 RTE_FLOW_ERROR_TYPE_ACTION,
3497                                 NULL, "unsupported too small "
3498                                 "raw decap and too small raw "
3499                                 "encap combination");
3500         }
3501         if (decap) {
3502                 ret = flow_dv_validate_action_decap(dev, *action_flags, action,
3503                                                     item_flags, attr, error);
3504                 if (ret < 0)
3505                         return ret;
3506                 *action_flags |= MLX5_FLOW_ACTION_DECAP;
3507                 ++(*actions_n);
3508         }
3509         if (encap) {
3510                 if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE)
3511                         return rte_flow_error_set(error, ENOTSUP,
3512                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3513                                                   NULL,
3514                                                   "small raw encap size");
3515                 if (*action_flags & MLX5_FLOW_ACTION_ENCAP)
3516                         return rte_flow_error_set(error, EINVAL,
3517                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3518                                                   NULL,
3519                                                   "more than one encap action");
3520                 if (!attr->transfer && priv->representor)
3521                         return rte_flow_error_set
3522                                         (error, ENOTSUP,
3523                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3524                                          "encap action for VF representor "
3525                                          "not supported on NIC table");
3526                 *action_flags |= MLX5_FLOW_ACTION_ENCAP;
3527                 ++(*actions_n);
3528         }
3529         return 0;
3530 }
3531
3532 /*
3533  * Validate the ASO CT action.
3534  *
3535  * @param[in] dev
3536  *   Pointer to the rte_eth_dev structure.
3537  * @param[in] action_flags
3538  *   Holds the actions detected until now.
3539  * @param[in] item_flags
3540  *   The items found in this flow rule.
3541  * @param[in] attr
3542  *   Pointer to flow attributes.
3543  * @param[out] error
3544  *   Pointer to error structure.
3545  *
3546  * @return
3547  *   0 on success, a negative errno value otherwise and rte_errno is set.
3548  */
3549 static int
3550 flow_dv_validate_action_aso_ct(struct rte_eth_dev *dev,
3551                                uint64_t action_flags,
3552                                uint64_t item_flags,
3553                                const struct rte_flow_attr *attr,
3554                                struct rte_flow_error *error)
3555 {
3556         RTE_SET_USED(dev);
3557
3558         if (attr->group == 0 && !attr->transfer)
3559                 return rte_flow_error_set(error, ENOTSUP,
3560                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3561                                           NULL,
3562                                           "Only support non-root table");
3563         if (action_flags & MLX5_FLOW_FATE_ACTIONS)
3564                 return rte_flow_error_set(error, ENOTSUP,
3565                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3566                                           "CT cannot follow a fate action");
3567         if ((action_flags & MLX5_FLOW_ACTION_METER) ||
3568             (action_flags & MLX5_FLOW_ACTION_AGE))
3569                 return rte_flow_error_set(error, EINVAL,
3570                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3571                                           "Only one ASO action is supported");
3572         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3573                 return rte_flow_error_set(error, EINVAL,
3574                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3575                                           "Encap cannot exist before CT");
3576         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
3577                 return rte_flow_error_set(error, EINVAL,
3578                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3579                                           "Not a outer TCP packet");
3580         return 0;
3581 }
3582
3583 /**
3584  * Match encap_decap resource.
3585  *
3586  * @param list
3587  *   Pointer to the hash list.
3588  * @param entry
3589  *   Pointer to exist resource entry object.
3590  * @param key
3591  *   Key of the new entry.
3592  * @param ctx_cb
3593  *   Pointer to new encap_decap resource.
3594  *
3595  * @return
3596  *   0 on matching, none-zero otherwise.
3597  */
3598 int
3599 flow_dv_encap_decap_match_cb(struct mlx5_hlist *list __rte_unused,
3600                              struct mlx5_hlist_entry *entry,
3601                              uint64_t key __rte_unused, void *cb_ctx)
3602 {
3603         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3604         struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
3605         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
3606
3607         cache_resource = container_of(entry,
3608                                       struct mlx5_flow_dv_encap_decap_resource,
3609                                       entry);
3610         if (resource->reformat_type == cache_resource->reformat_type &&
3611             resource->ft_type == cache_resource->ft_type &&
3612             resource->flags == cache_resource->flags &&
3613             resource->size == cache_resource->size &&
3614             !memcmp((const void *)resource->buf,
3615                     (const void *)cache_resource->buf,
3616                     resource->size))
3617                 return 0;
3618         return -1;
3619 }
3620
3621 /**
3622  * Allocate encap_decap resource.
3623  *
3624  * @param list
3625  *   Pointer to the hash list.
3626  * @param entry
3627  *   Pointer to exist resource entry object.
3628  * @param ctx_cb
3629  *   Pointer to new encap_decap resource.
3630  *
3631  * @return
3632  *   0 on matching, none-zero otherwise.
3633  */
3634 struct mlx5_hlist_entry *
3635 flow_dv_encap_decap_create_cb(struct mlx5_hlist *list,
3636                               uint64_t key __rte_unused,
3637                               void *cb_ctx)
3638 {
3639         struct mlx5_dev_ctx_shared *sh = list->ctx;
3640         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3641         struct mlx5dv_dr_domain *domain;
3642         struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
3643         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
3644         uint32_t idx;
3645         int ret;
3646
3647         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3648                 domain = sh->fdb_domain;
3649         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3650                 domain = sh->rx_domain;
3651         else
3652                 domain = sh->tx_domain;
3653         /* Register new encap/decap resource. */
3654         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
3655                                        &idx);
3656         if (!cache_resource) {
3657                 rte_flow_error_set(ctx->error, ENOMEM,
3658                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3659                                    "cannot allocate resource memory");
3660                 return NULL;
3661         }
3662         *cache_resource = *resource;
3663         cache_resource->idx = idx;
3664         ret = mlx5_flow_os_create_flow_action_packet_reformat
3665                                         (sh->ctx, domain, cache_resource,
3666                                          &cache_resource->action);
3667         if (ret) {
3668                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
3669                 rte_flow_error_set(ctx->error, ENOMEM,
3670                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3671                                    NULL, "cannot create action");
3672                 return NULL;
3673         }
3674
3675         return &cache_resource->entry;
3676 }
3677
3678 /**
3679  * Find existing encap/decap resource or create and register a new one.
3680  *
3681  * @param[in, out] dev
3682  *   Pointer to rte_eth_dev structure.
3683  * @param[in, out] resource
3684  *   Pointer to encap/decap resource.
3685  * @parm[in, out] dev_flow
3686  *   Pointer to the dev_flow.
3687  * @param[out] error
3688  *   pointer to error structure.
3689  *
3690  * @return
3691  *   0 on success otherwise -errno and errno is set.
3692  */
3693 static int
3694 flow_dv_encap_decap_resource_register
3695                         (struct rte_eth_dev *dev,
3696                          struct mlx5_flow_dv_encap_decap_resource *resource,
3697                          struct mlx5_flow *dev_flow,
3698                          struct rte_flow_error *error)
3699 {
3700         struct mlx5_priv *priv = dev->data->dev_private;
3701         struct mlx5_dev_ctx_shared *sh = priv->sh;
3702         struct mlx5_hlist_entry *entry;
3703         union {
3704                 struct {
3705                         uint32_t ft_type:8;
3706                         uint32_t refmt_type:8;
3707                         /*
3708                          * Header reformat actions can be shared between
3709                          * non-root tables. One bit to indicate non-root
3710                          * table or not.
3711                          */
3712                         uint32_t is_root:1;
3713                         uint32_t reserve:15;
3714                 };
3715                 uint32_t v32;
3716         } encap_decap_key = {
3717                 {
3718                         .ft_type = resource->ft_type,
3719                         .refmt_type = resource->reformat_type,
3720                         .is_root = !!dev_flow->dv.group,
3721                         .reserve = 0,
3722                 }
3723         };
3724         struct mlx5_flow_cb_ctx ctx = {
3725                 .error = error,
3726                 .data = resource,
3727         };
3728         uint64_t key64;
3729
3730         resource->flags = dev_flow->dv.group ? 0 : 1;
3731         key64 =  __rte_raw_cksum(&encap_decap_key.v32,
3732                                  sizeof(encap_decap_key.v32), 0);
3733         if (resource->reformat_type !=
3734             MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2 &&
3735             resource->size)
3736                 key64 = __rte_raw_cksum(resource->buf, resource->size, key64);
3737         entry = mlx5_hlist_register(sh->encaps_decaps, key64, &ctx);
3738         if (!entry)
3739                 return -rte_errno;
3740         resource = container_of(entry, typeof(*resource), entry);
3741         dev_flow->dv.encap_decap = resource;
3742         dev_flow->handle->dvh.rix_encap_decap = resource->idx;
3743         return 0;
3744 }
3745
3746 /**
3747  * Find existing table jump resource or create and register a new one.
3748  *
3749  * @param[in, out] dev
3750  *   Pointer to rte_eth_dev structure.
3751  * @param[in, out] tbl
3752  *   Pointer to flow table resource.
3753  * @parm[in, out] dev_flow
3754  *   Pointer to the dev_flow.
3755  * @param[out] error
3756  *   pointer to error structure.
3757  *
3758  * @return
3759  *   0 on success otherwise -errno and errno is set.
3760  */
3761 static int
3762 flow_dv_jump_tbl_resource_register
3763                         (struct rte_eth_dev *dev __rte_unused,
3764                          struct mlx5_flow_tbl_resource *tbl,
3765                          struct mlx5_flow *dev_flow,
3766                          struct rte_flow_error *error __rte_unused)
3767 {
3768         struct mlx5_flow_tbl_data_entry *tbl_data =
3769                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
3770
3771         MLX5_ASSERT(tbl);
3772         MLX5_ASSERT(tbl_data->jump.action);
3773         dev_flow->handle->rix_jump = tbl_data->idx;
3774         dev_flow->dv.jump = &tbl_data->jump;
3775         return 0;
3776 }
3777
3778 int
3779 flow_dv_port_id_match_cb(struct mlx5_cache_list *list __rte_unused,
3780                          struct mlx5_cache_entry *entry, void *cb_ctx)
3781 {
3782         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3783         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3784         struct mlx5_flow_dv_port_id_action_resource *res =
3785                         container_of(entry, typeof(*res), entry);
3786
3787         return ref->port_id != res->port_id;
3788 }
3789
3790 struct mlx5_cache_entry *
3791 flow_dv_port_id_create_cb(struct mlx5_cache_list *list,
3792                           struct mlx5_cache_entry *entry __rte_unused,
3793                           void *cb_ctx)
3794 {
3795         struct mlx5_dev_ctx_shared *sh = list->ctx;
3796         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3797         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3798         struct mlx5_flow_dv_port_id_action_resource *cache;
3799         uint32_t idx;
3800         int ret;
3801
3802         /* Register new port id action resource. */
3803         cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
3804         if (!cache) {
3805                 rte_flow_error_set(ctx->error, ENOMEM,
3806                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3807                                    "cannot allocate port_id action cache memory");
3808                 return NULL;
3809         }
3810         *cache = *ref;
3811         ret = mlx5_flow_os_create_flow_action_dest_port(sh->fdb_domain,
3812                                                         ref->port_id,
3813                                                         &cache->action);
3814         if (ret) {
3815                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], idx);
3816                 rte_flow_error_set(ctx->error, ENOMEM,
3817                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3818                                    "cannot create action");
3819                 return NULL;
3820         }
3821         cache->idx = idx;
3822         return &cache->entry;
3823 }
3824
3825 /**
3826  * Find existing table port ID resource or create and register a new one.
3827  *
3828  * @param[in, out] dev
3829  *   Pointer to rte_eth_dev structure.
3830  * @param[in, out] resource
3831  *   Pointer to port ID action resource.
3832  * @parm[in, out] dev_flow
3833  *   Pointer to the dev_flow.
3834  * @param[out] error
3835  *   pointer to error structure.
3836  *
3837  * @return
3838  *   0 on success otherwise -errno and errno is set.
3839  */
3840 static int
3841 flow_dv_port_id_action_resource_register
3842                         (struct rte_eth_dev *dev,
3843                          struct mlx5_flow_dv_port_id_action_resource *resource,
3844                          struct mlx5_flow *dev_flow,
3845                          struct rte_flow_error *error)
3846 {
3847         struct mlx5_priv *priv = dev->data->dev_private;
3848         struct mlx5_cache_entry *entry;
3849         struct mlx5_flow_dv_port_id_action_resource *cache;
3850         struct mlx5_flow_cb_ctx ctx = {
3851                 .error = error,
3852                 .data = resource,
3853         };
3854
3855         entry = mlx5_cache_register(&priv->sh->port_id_action_list, &ctx);
3856         if (!entry)
3857                 return -rte_errno;
3858         cache = container_of(entry, typeof(*cache), entry);
3859         dev_flow->dv.port_id_action = cache;
3860         dev_flow->handle->rix_port_id_action = cache->idx;
3861         return 0;
3862 }
3863
3864 int
3865 flow_dv_push_vlan_match_cb(struct mlx5_cache_list *list __rte_unused,
3866                          struct mlx5_cache_entry *entry, void *cb_ctx)
3867 {
3868         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3869         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3870         struct mlx5_flow_dv_push_vlan_action_resource *res =
3871                         container_of(entry, typeof(*res), entry);
3872
3873         return ref->vlan_tag != res->vlan_tag || ref->ft_type != res->ft_type;
3874 }
3875
3876 struct mlx5_cache_entry *
3877 flow_dv_push_vlan_create_cb(struct mlx5_cache_list *list,
3878                           struct mlx5_cache_entry *entry __rte_unused,
3879                           void *cb_ctx)
3880 {
3881         struct mlx5_dev_ctx_shared *sh = list->ctx;
3882         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3883         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3884         struct mlx5_flow_dv_push_vlan_action_resource *cache;
3885         struct mlx5dv_dr_domain *domain;
3886         uint32_t idx;
3887         int ret;
3888
3889         /* Register new port id action resource. */
3890         cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
3891         if (!cache) {
3892                 rte_flow_error_set(ctx->error, ENOMEM,
3893                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3894                                    "cannot allocate push_vlan action cache memory");
3895                 return NULL;
3896         }
3897         *cache = *ref;
3898         if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3899                 domain = sh->fdb_domain;
3900         else if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3901                 domain = sh->rx_domain;
3902         else
3903                 domain = sh->tx_domain;
3904         ret = mlx5_flow_os_create_flow_action_push_vlan(domain, ref->vlan_tag,
3905                                                         &cache->action);
3906         if (ret) {
3907                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
3908                 rte_flow_error_set(ctx->error, ENOMEM,
3909                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3910                                    "cannot create push vlan action");
3911                 return NULL;
3912         }
3913         cache->idx = idx;
3914         return &cache->entry;
3915 }
3916
3917 /**
3918  * Find existing push vlan resource or create and register a new one.
3919  *
3920  * @param [in, out] dev
3921  *   Pointer to rte_eth_dev structure.
3922  * @param[in, out] resource
3923  *   Pointer to port ID action resource.
3924  * @parm[in, out] dev_flow
3925  *   Pointer to the dev_flow.
3926  * @param[out] error
3927  *   pointer to error structure.
3928  *
3929  * @return
3930  *   0 on success otherwise -errno and errno is set.
3931  */
3932 static int
3933 flow_dv_push_vlan_action_resource_register
3934                        (struct rte_eth_dev *dev,
3935                         struct mlx5_flow_dv_push_vlan_action_resource *resource,
3936                         struct mlx5_flow *dev_flow,
3937                         struct rte_flow_error *error)
3938 {
3939         struct mlx5_priv *priv = dev->data->dev_private;
3940         struct mlx5_flow_dv_push_vlan_action_resource *cache;
3941         struct mlx5_cache_entry *entry;
3942         struct mlx5_flow_cb_ctx ctx = {
3943                 .error = error,
3944                 .data = resource,
3945         };
3946
3947         entry = mlx5_cache_register(&priv->sh->push_vlan_action_list, &ctx);
3948         if (!entry)
3949                 return -rte_errno;
3950         cache = container_of(entry, typeof(*cache), entry);
3951
3952         dev_flow->handle->dvh.rix_push_vlan = cache->idx;
3953         dev_flow->dv.push_vlan_res = cache;
3954         return 0;
3955 }
3956
3957 /**
3958  * Get the size of specific rte_flow_item_type hdr size
3959  *
3960  * @param[in] item_type
3961  *   Tested rte_flow_item_type.
3962  *
3963  * @return
3964  *   sizeof struct item_type, 0 if void or irrelevant.
3965  */
3966 static size_t
3967 flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type)
3968 {
3969         size_t retval;
3970
3971         switch (item_type) {
3972         case RTE_FLOW_ITEM_TYPE_ETH:
3973                 retval = sizeof(struct rte_ether_hdr);
3974                 break;
3975         case RTE_FLOW_ITEM_TYPE_VLAN:
3976                 retval = sizeof(struct rte_vlan_hdr);
3977                 break;
3978         case RTE_FLOW_ITEM_TYPE_IPV4:
3979                 retval = sizeof(struct rte_ipv4_hdr);
3980                 break;
3981         case RTE_FLOW_ITEM_TYPE_IPV6:
3982                 retval = sizeof(struct rte_ipv6_hdr);
3983                 break;
3984         case RTE_FLOW_ITEM_TYPE_UDP:
3985                 retval = sizeof(struct rte_udp_hdr);
3986                 break;
3987         case RTE_FLOW_ITEM_TYPE_TCP:
3988                 retval = sizeof(struct rte_tcp_hdr);
3989                 break;
3990         case RTE_FLOW_ITEM_TYPE_VXLAN:
3991         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3992                 retval = sizeof(struct rte_vxlan_hdr);
3993                 break;
3994         case RTE_FLOW_ITEM_TYPE_GRE:
3995         case RTE_FLOW_ITEM_TYPE_NVGRE:
3996                 retval = sizeof(struct rte_gre_hdr);
3997                 break;
3998         case RTE_FLOW_ITEM_TYPE_MPLS:
3999                 retval = sizeof(struct rte_mpls_hdr);
4000                 break;
4001         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
4002         default:
4003                 retval = 0;
4004                 break;
4005         }
4006         return retval;
4007 }
4008
4009 #define MLX5_ENCAP_IPV4_VERSION         0x40
4010 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
4011 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
4012 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
4013 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
4014 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
4015 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
4016
4017 /**
4018  * Convert the encap action data from list of rte_flow_item to raw buffer
4019  *
4020  * @param[in] items
4021  *   Pointer to rte_flow_item objects list.
4022  * @param[out] buf
4023  *   Pointer to the output buffer.
4024  * @param[out] size
4025  *   Pointer to the output buffer size.
4026  * @param[out] error
4027  *   Pointer to the error structure.
4028  *
4029  * @return
4030  *   0 on success, a negative errno value otherwise and rte_errno is set.
4031  */
4032 static int
4033 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
4034                            size_t *size, struct rte_flow_error *error)
4035 {
4036         struct rte_ether_hdr *eth = NULL;
4037         struct rte_vlan_hdr *vlan = NULL;
4038         struct rte_ipv4_hdr *ipv4 = NULL;
4039         struct rte_ipv6_hdr *ipv6 = NULL;
4040         struct rte_udp_hdr *udp = NULL;
4041         struct rte_vxlan_hdr *vxlan = NULL;
4042         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
4043         struct rte_gre_hdr *gre = NULL;
4044         size_t len;
4045         size_t temp_size = 0;
4046
4047         if (!items)
4048                 return rte_flow_error_set(error, EINVAL,
4049                                           RTE_FLOW_ERROR_TYPE_ACTION,
4050                                           NULL, "invalid empty data");
4051         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
4052                 len = flow_dv_get_item_hdr_len(items->type);
4053                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
4054                         return rte_flow_error_set(error, EINVAL,
4055                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4056                                                   (void *)items->type,
4057                                                   "items total size is too big"
4058                                                   " for encap action");
4059                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
4060                 switch (items->type) {
4061                 case RTE_FLOW_ITEM_TYPE_ETH:
4062                         eth = (struct rte_ether_hdr *)&buf[temp_size];
4063                         break;
4064                 case RTE_FLOW_ITEM_TYPE_VLAN:
4065                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
4066                         if (!eth)
4067                                 return rte_flow_error_set(error, EINVAL,
4068                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4069                                                 (void *)items->type,
4070                                                 "eth header not found");
4071                         if (!eth->ether_type)
4072                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
4073                         break;
4074                 case RTE_FLOW_ITEM_TYPE_IPV4:
4075                         ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
4076                         if (!vlan && !eth)
4077                                 return rte_flow_error_set(error, EINVAL,
4078                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4079                                                 (void *)items->type,
4080                                                 "neither eth nor vlan"
4081                                                 " header found");
4082                         if (vlan && !vlan->eth_proto)
4083                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
4084                         else if (eth && !eth->ether_type)
4085                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
4086                         if (!ipv4->version_ihl)
4087                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
4088                                                     MLX5_ENCAP_IPV4_IHL_MIN;
4089                         if (!ipv4->time_to_live)
4090                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
4091                         break;
4092                 case RTE_FLOW_ITEM_TYPE_IPV6:
4093                         ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
4094                         if (!vlan && !eth)
4095                                 return rte_flow_error_set(error, EINVAL,
4096                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4097                                                 (void *)items->type,
4098                                                 "neither eth nor vlan"
4099                                                 " header found");
4100                         if (vlan && !vlan->eth_proto)
4101                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
4102                         else if (eth && !eth->ether_type)
4103                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
4104                         if (!ipv6->vtc_flow)
4105                                 ipv6->vtc_flow =
4106                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
4107                         if (!ipv6->hop_limits)
4108                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
4109                         break;
4110                 case RTE_FLOW_ITEM_TYPE_UDP:
4111                         udp = (struct rte_udp_hdr *)&buf[temp_size];
4112                         if (!ipv4 && !ipv6)
4113                                 return rte_flow_error_set(error, EINVAL,
4114                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4115                                                 (void *)items->type,
4116                                                 "ip header not found");
4117                         if (ipv4 && !ipv4->next_proto_id)
4118                                 ipv4->next_proto_id = IPPROTO_UDP;
4119                         else if (ipv6 && !ipv6->proto)
4120                                 ipv6->proto = IPPROTO_UDP;
4121                         break;
4122                 case RTE_FLOW_ITEM_TYPE_VXLAN:
4123                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
4124                         if (!udp)
4125                                 return rte_flow_error_set(error, EINVAL,
4126                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4127                                                 (void *)items->type,
4128                                                 "udp header not found");
4129                         if (!udp->dst_port)
4130                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
4131                         if (!vxlan->vx_flags)
4132                                 vxlan->vx_flags =
4133                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
4134                         break;
4135                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
4136                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
4137                         if (!udp)
4138                                 return rte_flow_error_set(error, EINVAL,
4139                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4140                                                 (void *)items->type,
4141                                                 "udp header not found");
4142                         if (!vxlan_gpe->proto)
4143                                 return rte_flow_error_set(error, EINVAL,
4144                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4145                                                 (void *)items->type,
4146                                                 "next protocol not found");
4147                         if (!udp->dst_port)
4148                                 udp->dst_port =
4149                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
4150                         if (!vxlan_gpe->vx_flags)
4151                                 vxlan_gpe->vx_flags =
4152                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
4153                         break;
4154                 case RTE_FLOW_ITEM_TYPE_GRE:
4155                 case RTE_FLOW_ITEM_TYPE_NVGRE:
4156                         gre = (struct rte_gre_hdr *)&buf[temp_size];
4157                         if (!gre->proto)
4158                                 return rte_flow_error_set(error, EINVAL,
4159                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4160                                                 (void *)items->type,
4161                                                 "next protocol not found");
4162                         if (!ipv4 && !ipv6)
4163                                 return rte_flow_error_set(error, EINVAL,
4164                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4165                                                 (void *)items->type,
4166                                                 "ip header not found");
4167                         if (ipv4 && !ipv4->next_proto_id)
4168                                 ipv4->next_proto_id = IPPROTO_GRE;
4169                         else if (ipv6 && !ipv6->proto)
4170                                 ipv6->proto = IPPROTO_GRE;
4171                         break;
4172                 case RTE_FLOW_ITEM_TYPE_VOID:
4173                         break;
4174                 default:
4175                         return rte_flow_error_set(error, EINVAL,
4176                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4177                                                   (void *)items->type,
4178                                                   "unsupported item type");
4179                         break;
4180                 }
4181                 temp_size += len;
4182         }
4183         *size = temp_size;
4184         return 0;
4185 }
4186
4187 static int
4188 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
4189 {
4190         struct rte_ether_hdr *eth = NULL;
4191         struct rte_vlan_hdr *vlan = NULL;
4192         struct rte_ipv6_hdr *ipv6 = NULL;
4193         struct rte_udp_hdr *udp = NULL;
4194         char *next_hdr;
4195         uint16_t proto;
4196
4197         eth = (struct rte_ether_hdr *)data;
4198         next_hdr = (char *)(eth + 1);
4199         proto = RTE_BE16(eth->ether_type);
4200
4201         /* VLAN skipping */
4202         while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
4203                 vlan = (struct rte_vlan_hdr *)next_hdr;
4204                 proto = RTE_BE16(vlan->eth_proto);
4205                 next_hdr += sizeof(struct rte_vlan_hdr);
4206         }
4207
4208         /* HW calculates IPv4 csum. no need to proceed */
4209         if (proto == RTE_ETHER_TYPE_IPV4)
4210                 return 0;
4211
4212         /* non IPv4/IPv6 header. not supported */
4213         if (proto != RTE_ETHER_TYPE_IPV6) {
4214                 return rte_flow_error_set(error, ENOTSUP,
4215                                           RTE_FLOW_ERROR_TYPE_ACTION,
4216                                           NULL, "Cannot offload non IPv4/IPv6");
4217         }
4218
4219         ipv6 = (struct rte_ipv6_hdr *)next_hdr;
4220
4221         /* ignore non UDP */
4222         if (ipv6->proto != IPPROTO_UDP)
4223                 return 0;
4224
4225         udp = (struct rte_udp_hdr *)(ipv6 + 1);
4226         udp->dgram_cksum = 0;
4227
4228         return 0;
4229 }
4230
4231 /**
4232  * Convert L2 encap action to DV specification.
4233  *
4234  * @param[in] dev
4235  *   Pointer to rte_eth_dev structure.
4236  * @param[in] action
4237  *   Pointer to action structure.
4238  * @param[in, out] dev_flow
4239  *   Pointer to the mlx5_flow.
4240  * @param[in] transfer
4241  *   Mark if the flow is E-Switch flow.
4242  * @param[out] error
4243  *   Pointer to the error structure.
4244  *
4245  * @return
4246  *   0 on success, a negative errno value otherwise and rte_errno is set.
4247  */
4248 static int
4249 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
4250                                const struct rte_flow_action *action,
4251                                struct mlx5_flow *dev_flow,
4252                                uint8_t transfer,
4253                                struct rte_flow_error *error)
4254 {
4255         const struct rte_flow_item *encap_data;
4256         const struct rte_flow_action_raw_encap *raw_encap_data;
4257         struct mlx5_flow_dv_encap_decap_resource res = {
4258                 .reformat_type =
4259                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
4260                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4261                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
4262         };
4263
4264         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
4265                 raw_encap_data =
4266                         (const struct rte_flow_action_raw_encap *)action->conf;
4267                 res.size = raw_encap_data->size;
4268                 memcpy(res.buf, raw_encap_data->data, res.size);
4269         } else {
4270                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
4271                         encap_data =
4272                                 ((const struct rte_flow_action_vxlan_encap *)
4273                                                 action->conf)->definition;
4274                 else
4275                         encap_data =
4276                                 ((const struct rte_flow_action_nvgre_encap *)
4277                                                 action->conf)->definition;
4278                 if (flow_dv_convert_encap_data(encap_data, res.buf,
4279                                                &res.size, error))
4280                         return -rte_errno;
4281         }
4282         if (flow_dv_zero_encap_udp_csum(res.buf, error))
4283                 return -rte_errno;
4284         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4285                 return rte_flow_error_set(error, EINVAL,
4286                                           RTE_FLOW_ERROR_TYPE_ACTION,
4287                                           NULL, "can't create L2 encap action");
4288         return 0;
4289 }
4290
4291 /**
4292  * Convert L2 decap action to DV specification.
4293  *
4294  * @param[in] dev
4295  *   Pointer to rte_eth_dev structure.
4296  * @param[in, out] dev_flow
4297  *   Pointer to the mlx5_flow.
4298  * @param[in] transfer
4299  *   Mark if the flow is E-Switch flow.
4300  * @param[out] error
4301  *   Pointer to the error structure.
4302  *
4303  * @return
4304  *   0 on success, a negative errno value otherwise and rte_errno is set.
4305  */
4306 static int
4307 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
4308                                struct mlx5_flow *dev_flow,
4309                                uint8_t transfer,
4310                                struct rte_flow_error *error)
4311 {
4312         struct mlx5_flow_dv_encap_decap_resource res = {
4313                 .size = 0,
4314                 .reformat_type =
4315                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
4316                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4317                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
4318         };
4319
4320         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4321                 return rte_flow_error_set(error, EINVAL,
4322                                           RTE_FLOW_ERROR_TYPE_ACTION,
4323                                           NULL, "can't create L2 decap action");
4324         return 0;
4325 }
4326
4327 /**
4328  * Convert raw decap/encap (L3 tunnel) action to DV specification.
4329  *
4330  * @param[in] dev
4331  *   Pointer to rte_eth_dev structure.
4332  * @param[in] action
4333  *   Pointer to action structure.
4334  * @param[in, out] dev_flow
4335  *   Pointer to the mlx5_flow.
4336  * @param[in] attr
4337  *   Pointer to the flow attributes.
4338  * @param[out] error
4339  *   Pointer to the error structure.
4340  *
4341  * @return
4342  *   0 on success, a negative errno value otherwise and rte_errno is set.
4343  */
4344 static int
4345 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
4346                                 const struct rte_flow_action *action,
4347                                 struct mlx5_flow *dev_flow,
4348                                 const struct rte_flow_attr *attr,
4349                                 struct rte_flow_error *error)
4350 {
4351         const struct rte_flow_action_raw_encap *encap_data;
4352         struct mlx5_flow_dv_encap_decap_resource res;
4353
4354         memset(&res, 0, sizeof(res));
4355         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
4356         res.size = encap_data->size;
4357         memcpy(res.buf, encap_data->data, res.size);
4358         res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
4359                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
4360                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
4361         if (attr->transfer)
4362                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4363         else
4364                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4365                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4366         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4367                 return rte_flow_error_set(error, EINVAL,
4368                                           RTE_FLOW_ERROR_TYPE_ACTION,
4369                                           NULL, "can't create encap action");
4370         return 0;
4371 }
4372
4373 /**
4374  * Create action push VLAN.
4375  *
4376  * @param[in] dev
4377  *   Pointer to rte_eth_dev structure.
4378  * @param[in] attr
4379  *   Pointer to the flow attributes.
4380  * @param[in] vlan
4381  *   Pointer to the vlan to push to the Ethernet header.
4382  * @param[in, out] dev_flow
4383  *   Pointer to the mlx5_flow.
4384  * @param[out] error
4385  *   Pointer to the error structure.
4386  *
4387  * @return
4388  *   0 on success, a negative errno value otherwise and rte_errno is set.
4389  */
4390 static int
4391 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
4392                                 const struct rte_flow_attr *attr,
4393                                 const struct rte_vlan_hdr *vlan,
4394                                 struct mlx5_flow *dev_flow,
4395                                 struct rte_flow_error *error)
4396 {
4397         struct mlx5_flow_dv_push_vlan_action_resource res;
4398
4399         memset(&res, 0, sizeof(res));
4400         res.vlan_tag =
4401                 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
4402                                  vlan->vlan_tci);
4403         if (attr->transfer)
4404                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4405         else
4406                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4407                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4408         return flow_dv_push_vlan_action_resource_register
4409                                             (dev, &res, dev_flow, error);
4410 }
4411
4412 /**
4413  * Validate the modify-header actions.
4414  *
4415  * @param[in] action_flags
4416  *   Holds the actions detected until now.
4417  * @param[in] action
4418  *   Pointer to the modify action.
4419  * @param[out] error
4420  *   Pointer to error structure.
4421  *
4422  * @return
4423  *   0 on success, a negative errno value otherwise and rte_errno is set.
4424  */
4425 static int
4426 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
4427                                    const struct rte_flow_action *action,
4428                                    struct rte_flow_error *error)
4429 {
4430         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
4431                 return rte_flow_error_set(error, EINVAL,
4432                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4433                                           NULL, "action configuration not set");
4434         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
4435                 return rte_flow_error_set(error, EINVAL,
4436                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4437                                           "can't have encap action before"
4438                                           " modify action");
4439         return 0;
4440 }
4441
4442 /**
4443  * Validate the modify-header MAC address actions.
4444  *
4445  * @param[in] action_flags
4446  *   Holds the actions detected until now.
4447  * @param[in] action
4448  *   Pointer to the modify action.
4449  * @param[in] item_flags
4450  *   Holds the items detected.
4451  * @param[out] error
4452  *   Pointer to error structure.
4453  *
4454  * @return
4455  *   0 on success, a negative errno value otherwise and rte_errno is set.
4456  */
4457 static int
4458 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
4459                                    const struct rte_flow_action *action,
4460                                    const uint64_t item_flags,
4461                                    struct rte_flow_error *error)
4462 {
4463         int ret = 0;
4464
4465         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4466         if (!ret) {
4467                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
4468                         return rte_flow_error_set(error, EINVAL,
4469                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4470                                                   NULL,
4471                                                   "no L2 item in pattern");
4472         }
4473         return ret;
4474 }
4475
4476 /**
4477  * Validate the modify-header IPv4 address actions.
4478  *
4479  * @param[in] action_flags
4480  *   Holds the actions detected until now.
4481  * @param[in] action
4482  *   Pointer to the modify action.
4483  * @param[in] item_flags
4484  *   Holds the items detected.
4485  * @param[out] error
4486  *   Pointer to error structure.
4487  *
4488  * @return
4489  *   0 on success, a negative errno value otherwise and rte_errno is set.
4490  */
4491 static int
4492 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
4493                                     const struct rte_flow_action *action,
4494                                     const uint64_t item_flags,
4495                                     struct rte_flow_error *error)
4496 {
4497         int ret = 0;
4498         uint64_t layer;
4499
4500         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4501         if (!ret) {
4502                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4503                                  MLX5_FLOW_LAYER_INNER_L3_IPV4 :
4504                                  MLX5_FLOW_LAYER_OUTER_L3_IPV4;
4505                 if (!(item_flags & layer))
4506                         return rte_flow_error_set(error, EINVAL,
4507                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4508                                                   NULL,
4509                                                   "no ipv4 item in pattern");
4510         }
4511         return ret;
4512 }
4513
4514 /**
4515  * Validate the modify-header IPv6 address actions.
4516  *
4517  * @param[in] action_flags
4518  *   Holds the actions detected until now.
4519  * @param[in] action
4520  *   Pointer to the modify action.
4521  * @param[in] item_flags
4522  *   Holds the items detected.
4523  * @param[out] error
4524  *   Pointer to error structure.
4525  *
4526  * @return
4527  *   0 on success, a negative errno value otherwise and rte_errno is set.
4528  */
4529 static int
4530 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
4531                                     const struct rte_flow_action *action,
4532                                     const uint64_t item_flags,
4533                                     struct rte_flow_error *error)
4534 {
4535         int ret = 0;
4536         uint64_t layer;
4537
4538         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4539         if (!ret) {
4540                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4541                                  MLX5_FLOW_LAYER_INNER_L3_IPV6 :
4542                                  MLX5_FLOW_LAYER_OUTER_L3_IPV6;
4543                 if (!(item_flags & layer))
4544                         return rte_flow_error_set(error, EINVAL,
4545                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4546                                                   NULL,
4547                                                   "no ipv6 item in pattern");
4548         }
4549         return ret;
4550 }
4551
4552 /**
4553  * Validate the modify-header TP actions.
4554  *
4555  * @param[in] action_flags
4556  *   Holds the actions detected until now.
4557  * @param[in] action
4558  *   Pointer to the modify action.
4559  * @param[in] item_flags
4560  *   Holds the items detected.
4561  * @param[out] error
4562  *   Pointer to error structure.
4563  *
4564  * @return
4565  *   0 on success, a negative errno value otherwise and rte_errno is set.
4566  */
4567 static int
4568 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
4569                                   const struct rte_flow_action *action,
4570                                   const uint64_t item_flags,
4571                                   struct rte_flow_error *error)
4572 {
4573         int ret = 0;
4574         uint64_t layer;
4575
4576         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4577         if (!ret) {
4578                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4579                                  MLX5_FLOW_LAYER_INNER_L4 :
4580                                  MLX5_FLOW_LAYER_OUTER_L4;
4581                 if (!(item_flags & layer))
4582                         return rte_flow_error_set(error, EINVAL,
4583                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4584                                                   NULL, "no transport layer "
4585                                                   "in pattern");
4586         }
4587         return ret;
4588 }
4589
4590 /**
4591  * Validate the modify-header actions of increment/decrement
4592  * TCP Sequence-number.
4593  *
4594  * @param[in] action_flags
4595  *   Holds the actions detected until now.
4596  * @param[in] action
4597  *   Pointer to the modify action.
4598  * @param[in] item_flags
4599  *   Holds the items detected.
4600  * @param[out] error
4601  *   Pointer to error structure.
4602  *
4603  * @return
4604  *   0 on success, a negative errno value otherwise and rte_errno is set.
4605  */
4606 static int
4607 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
4608                                        const struct rte_flow_action *action,
4609                                        const uint64_t item_flags,
4610                                        struct rte_flow_error *error)
4611 {
4612         int ret = 0;
4613         uint64_t layer;
4614
4615         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4616         if (!ret) {
4617                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4618                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
4619                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
4620                 if (!(item_flags & layer))
4621                         return rte_flow_error_set(error, EINVAL,
4622                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4623                                                   NULL, "no TCP item in"
4624                                                   " pattern");
4625                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
4626                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
4627                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
4628                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
4629                         return rte_flow_error_set(error, EINVAL,
4630                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4631                                                   NULL,
4632                                                   "cannot decrease and increase"
4633                                                   " TCP sequence number"
4634                                                   " at the same time");
4635         }
4636         return ret;
4637 }
4638
4639 /**
4640  * Validate the modify-header actions of increment/decrement
4641  * TCP Acknowledgment number.
4642  *
4643  * @param[in] action_flags
4644  *   Holds the actions detected until now.
4645  * @param[in] action
4646  *   Pointer to the modify action.
4647  * @param[in] item_flags
4648  *   Holds the items detected.
4649  * @param[out] error
4650  *   Pointer to error structure.
4651  *
4652  * @return
4653  *   0 on success, a negative errno value otherwise and rte_errno is set.
4654  */
4655 static int
4656 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
4657                                        const struct rte_flow_action *action,
4658                                        const uint64_t item_flags,
4659                                        struct rte_flow_error *error)
4660 {
4661         int ret = 0;
4662         uint64_t layer;
4663
4664         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4665         if (!ret) {
4666                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4667                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
4668                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
4669                 if (!(item_flags & layer))
4670                         return rte_flow_error_set(error, EINVAL,
4671                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4672                                                   NULL, "no TCP item in"
4673                                                   " pattern");
4674                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
4675                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
4676                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
4677                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
4678                         return rte_flow_error_set(error, EINVAL,
4679                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4680                                                   NULL,
4681                                                   "cannot decrease and increase"
4682                                                   " TCP acknowledgment number"
4683                                                   " at the same time");
4684         }
4685         return ret;
4686 }
4687
4688 /**
4689  * Validate the modify-header TTL actions.
4690  *
4691  * @param[in] action_flags
4692  *   Holds the actions detected until now.
4693  * @param[in] action
4694  *   Pointer to the modify action.
4695  * @param[in] item_flags
4696  *   Holds the items detected.
4697  * @param[out] error
4698  *   Pointer to error structure.
4699  *
4700  * @return
4701  *   0 on success, a negative errno value otherwise and rte_errno is set.
4702  */
4703 static int
4704 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
4705                                    const struct rte_flow_action *action,
4706                                    const uint64_t item_flags,
4707                                    struct rte_flow_error *error)
4708 {
4709         int ret = 0;
4710         uint64_t layer;
4711
4712         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4713         if (!ret) {
4714                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4715                                  MLX5_FLOW_LAYER_INNER_L3 :
4716                                  MLX5_FLOW_LAYER_OUTER_L3;
4717                 if (!(item_flags & layer))
4718                         return rte_flow_error_set(error, EINVAL,
4719                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4720                                                   NULL,
4721                                                   "no IP protocol in pattern");
4722         }
4723         return ret;
4724 }
4725
4726 /**
4727  * Validate the generic modify field actions.
4728  * @param[in] dev
4729  *   Pointer to the rte_eth_dev structure.
4730  * @param[in] action_flags
4731  *   Holds the actions detected until now.
4732  * @param[in] action
4733  *   Pointer to the modify action.
4734  * @param[in] attr
4735  *   Pointer to the flow attributes.
4736  * @param[out] error
4737  *   Pointer to error structure.
4738  *
4739  * @return
4740  *   Number of header fields to modify (0 or more) on success,
4741  *   a negative errno value otherwise and rte_errno is set.
4742  */
4743 static int
4744 flow_dv_validate_action_modify_field(struct rte_eth_dev *dev,
4745                                    const uint64_t action_flags,
4746                                    const struct rte_flow_action *action,
4747                                    const struct rte_flow_attr *attr,
4748                                    struct rte_flow_error *error)
4749 {
4750         int ret = 0;
4751         struct mlx5_priv *priv = dev->data->dev_private;
4752         struct mlx5_dev_config *config = &priv->config;
4753         const struct rte_flow_action_modify_field *action_modify_field =
4754                 action->conf;
4755         uint32_t dst_width = mlx5_flow_item_field_width(config,
4756                                 action_modify_field->dst.field);
4757         uint32_t src_width = mlx5_flow_item_field_width(config,
4758                                 action_modify_field->src.field);
4759
4760         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4761         if (ret)
4762                 return ret;
4763
4764         if (action_modify_field->width == 0)
4765                 return rte_flow_error_set(error, EINVAL,
4766                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4767                                 "no bits are requested to be modified");
4768         else if (action_modify_field->width > dst_width ||
4769                  action_modify_field->width > src_width)
4770                 return rte_flow_error_set(error, EINVAL,
4771                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4772                                 "cannot modify more bits than"
4773                                 " the width of a field");
4774         if (action_modify_field->dst.field != RTE_FLOW_FIELD_VALUE &&
4775             action_modify_field->dst.field != RTE_FLOW_FIELD_POINTER) {
4776                 if ((action_modify_field->dst.offset +
4777                      action_modify_field->width > dst_width) ||
4778                     (action_modify_field->dst.offset % 32))
4779                         return rte_flow_error_set(error, EINVAL,
4780                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4781                                         "destination offset is too big"
4782                                         " or not aligned to 4 bytes");
4783                 if (action_modify_field->dst.level &&
4784                     action_modify_field->dst.field != RTE_FLOW_FIELD_TAG)
4785                         return rte_flow_error_set(error, ENOTSUP,
4786                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4787                                         "inner header fields modification"
4788                                         " is not supported");
4789         }
4790         if (action_modify_field->src.field != RTE_FLOW_FIELD_VALUE &&
4791             action_modify_field->src.field != RTE_FLOW_FIELD_POINTER) {
4792                 if (!attr->transfer && !attr->group)
4793                         return rte_flow_error_set(error, ENOTSUP,
4794                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4795                                         "modify field action is not"
4796                                         " supported for group 0");
4797                 if ((action_modify_field->src.offset +
4798                      action_modify_field->width > src_width) ||
4799                     (action_modify_field->src.offset % 32))
4800                         return rte_flow_error_set(error, EINVAL,
4801                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4802                                         "source offset is too big"
4803                                         " or not aligned to 4 bytes");
4804                 if (action_modify_field->src.level &&
4805                     action_modify_field->src.field != RTE_FLOW_FIELD_TAG)
4806                         return rte_flow_error_set(error, ENOTSUP,
4807                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4808                                         "inner header fields modification"
4809                                         " is not supported");
4810         }
4811         if ((action_modify_field->dst.field ==
4812              action_modify_field->src.field) &&
4813             (action_modify_field->dst.level ==
4814              action_modify_field->src.level))
4815                 return rte_flow_error_set(error, EINVAL,
4816                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4817                                 "source and destination fields"
4818                                 " cannot be the same");
4819         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VALUE ||
4820             action_modify_field->dst.field == RTE_FLOW_FIELD_POINTER)
4821                 return rte_flow_error_set(error, EINVAL,
4822                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4823                                 "immediate value or a pointer to it"
4824                                 " cannot be used as a destination");
4825         if (action_modify_field->dst.field == RTE_FLOW_FIELD_START ||
4826             action_modify_field->src.field == RTE_FLOW_FIELD_START)
4827                 return rte_flow_error_set(error, ENOTSUP,
4828                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4829                                 "modifications of an arbitrary"
4830                                 " place in a packet is not supported");
4831         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VLAN_TYPE ||
4832             action_modify_field->src.field == RTE_FLOW_FIELD_VLAN_TYPE)
4833                 return rte_flow_error_set(error, ENOTSUP,
4834                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4835                                 "modifications of the 802.1Q Tag"
4836                                 " Identifier is not supported");
4837         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VXLAN_VNI ||
4838             action_modify_field->src.field == RTE_FLOW_FIELD_VXLAN_VNI)
4839                 return rte_flow_error_set(error, ENOTSUP,
4840                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4841                                 "modifications of the VXLAN Network"
4842                                 " Identifier is not supported");
4843         if (action_modify_field->dst.field == RTE_FLOW_FIELD_GENEVE_VNI ||
4844             action_modify_field->src.field == RTE_FLOW_FIELD_GENEVE_VNI)
4845                 return rte_flow_error_set(error, ENOTSUP,
4846                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4847                                 "modifications of the GENEVE Network"
4848                                 " Identifier is not supported");
4849         if (action_modify_field->dst.field == RTE_FLOW_FIELD_MARK ||
4850             action_modify_field->src.field == RTE_FLOW_FIELD_MARK ||
4851             action_modify_field->dst.field == RTE_FLOW_FIELD_META ||
4852             action_modify_field->src.field == RTE_FLOW_FIELD_META) {
4853                 if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
4854                     !mlx5_flow_ext_mreg_supported(dev))
4855                         return rte_flow_error_set(error, ENOTSUP,
4856                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4857                                         "cannot modify mark or metadata without"
4858                                         " extended metadata register support");
4859         }
4860         if (action_modify_field->operation != RTE_FLOW_MODIFY_SET)
4861                 return rte_flow_error_set(error, ENOTSUP,
4862                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4863                                 "add and sub operations"
4864                                 " are not supported");
4865         return (action_modify_field->width / 32) +
4866                !!(action_modify_field->width % 32);
4867 }
4868
4869 /**
4870  * Validate jump action.
4871  *
4872  * @param[in] action
4873  *   Pointer to the jump action.
4874  * @param[in] action_flags
4875  *   Holds the actions detected until now.
4876  * @param[in] attributes
4877  *   Pointer to flow attributes
4878  * @param[in] external
4879  *   Action belongs to flow rule created by request external to PMD.
4880  * @param[out] error
4881  *   Pointer to error structure.
4882  *
4883  * @return
4884  *   0 on success, a negative errno value otherwise and rte_errno is set.
4885  */
4886 static int
4887 flow_dv_validate_action_jump(struct rte_eth_dev *dev,
4888                              const struct mlx5_flow_tunnel *tunnel,
4889                              const struct rte_flow_action *action,
4890                              uint64_t action_flags,
4891                              const struct rte_flow_attr *attributes,
4892                              bool external, struct rte_flow_error *error)
4893 {
4894         uint32_t target_group, table;
4895         int ret = 0;
4896         struct flow_grp_info grp_info = {
4897                 .external = !!external,
4898                 .transfer = !!attributes->transfer,
4899                 .fdb_def_rule = 1,
4900                 .std_tbl_fix = 0
4901         };
4902         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
4903                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4904                 return rte_flow_error_set(error, EINVAL,
4905                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4906                                           "can't have 2 fate actions in"
4907                                           " same flow");
4908         if (!action->conf)
4909                 return rte_flow_error_set(error, EINVAL,
4910                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4911                                           NULL, "action configuration not set");
4912         target_group =
4913                 ((const struct rte_flow_action_jump *)action->conf)->group;
4914         ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table,
4915                                        &grp_info, error);
4916         if (ret)
4917                 return ret;
4918         if (attributes->group == target_group &&
4919             !(action_flags & (MLX5_FLOW_ACTION_TUNNEL_SET |
4920                               MLX5_FLOW_ACTION_TUNNEL_MATCH)))
4921                 return rte_flow_error_set(error, EINVAL,
4922                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4923                                           "target group must be other than"
4924                                           " the current flow group");
4925         return 0;
4926 }
4927
4928 /*
4929  * Validate the port_id action.
4930  *
4931  * @param[in] dev
4932  *   Pointer to rte_eth_dev structure.
4933  * @param[in] action_flags
4934  *   Bit-fields that holds the actions detected until now.
4935  * @param[in] action
4936  *   Port_id RTE action structure.
4937  * @param[in] attr
4938  *   Attributes of flow that includes this action.
4939  * @param[out] error
4940  *   Pointer to error structure.
4941  *
4942  * @return
4943  *   0 on success, a negative errno value otherwise and rte_errno is set.
4944  */
4945 static int
4946 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
4947                                 uint64_t action_flags,
4948                                 const struct rte_flow_action *action,
4949                                 const struct rte_flow_attr *attr,
4950                                 struct rte_flow_error *error)
4951 {
4952         const struct rte_flow_action_port_id *port_id;
4953         struct mlx5_priv *act_priv;
4954         struct mlx5_priv *dev_priv;
4955         uint16_t port;
4956
4957         if (!attr->transfer)
4958                 return rte_flow_error_set(error, ENOTSUP,
4959                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4960                                           NULL,
4961                                           "port id action is valid in transfer"
4962                                           " mode only");
4963         if (!action || !action->conf)
4964                 return rte_flow_error_set(error, ENOTSUP,
4965                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4966                                           NULL,
4967                                           "port id action parameters must be"
4968                                           " specified");
4969         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
4970                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4971                 return rte_flow_error_set(error, EINVAL,
4972                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4973                                           "can have only one fate actions in"
4974                                           " a flow");
4975         dev_priv = mlx5_dev_to_eswitch_info(dev);
4976         if (!dev_priv)
4977                 return rte_flow_error_set(error, rte_errno,
4978                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4979                                           NULL,
4980                                           "failed to obtain E-Switch info");
4981         port_id = action->conf;
4982         port = port_id->original ? dev->data->port_id : port_id->id;
4983         act_priv = mlx5_port_to_eswitch_info(port, false);
4984         if (!act_priv)
4985                 return rte_flow_error_set
4986                                 (error, rte_errno,
4987                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
4988                                  "failed to obtain E-Switch port id for port");
4989         if (act_priv->domain_id != dev_priv->domain_id)
4990                 return rte_flow_error_set
4991                                 (error, EINVAL,
4992                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4993                                  "port does not belong to"
4994                                  " E-Switch being configured");
4995         return 0;
4996 }
4997
4998 /**
4999  * Get the maximum number of modify header actions.
5000  *
5001  * @param dev
5002  *   Pointer to rte_eth_dev structure.
5003  * @param flags
5004  *   Flags bits to check if root level.
5005  *
5006  * @return
5007  *   Max number of modify header actions device can support.
5008  */
5009 static inline unsigned int
5010 flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
5011                               uint64_t flags)
5012 {
5013         /*
5014          * There's no way to directly query the max capacity from FW.
5015          * The maximal value on root table should be assumed to be supported.
5016          */
5017         if (!(flags & MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL))
5018                 return MLX5_MAX_MODIFY_NUM;
5019         else
5020                 return MLX5_ROOT_TBL_MODIFY_NUM;
5021 }
5022
5023 /**
5024  * Validate the meter action.
5025  *
5026  * @param[in] dev
5027  *   Pointer to rte_eth_dev structure.
5028  * @param[in] action_flags
5029  *   Bit-fields that holds the actions detected until now.
5030  * @param[in] action
5031  *   Pointer to the meter action.
5032  * @param[in] attr
5033  *   Attributes of flow that includes this action.
5034  * @param[in] port_id_item
5035  *   Pointer to item indicating port id.
5036  * @param[out] error
5037  *   Pointer to error structure.
5038  *
5039  * @return
5040  *   0 on success, a negative errno value otherwise and rte_ernno is set.
5041  */
5042 static int
5043 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
5044                                 uint64_t action_flags,
5045                                 const struct rte_flow_action *action,
5046                                 const struct rte_flow_attr *attr,
5047                                 const struct rte_flow_item *port_id_item,
5048                                 bool *def_policy,
5049                                 struct rte_flow_error *error)
5050 {
5051         struct mlx5_priv *priv = dev->data->dev_private;
5052         const struct rte_flow_action_meter *am = action->conf;
5053         struct mlx5_flow_meter_info *fm;
5054         struct mlx5_flow_meter_policy *mtr_policy;
5055         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
5056
5057         if (!am)
5058                 return rte_flow_error_set(error, EINVAL,
5059                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5060                                           "meter action conf is NULL");
5061
5062         if (action_flags & MLX5_FLOW_ACTION_METER)
5063                 return rte_flow_error_set(error, ENOTSUP,
5064                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5065                                           "meter chaining not support");
5066         if (action_flags & MLX5_FLOW_ACTION_JUMP)
5067                 return rte_flow_error_set(error, ENOTSUP,
5068                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5069                                           "meter with jump not support");
5070         if (!priv->mtr_en)
5071                 return rte_flow_error_set(error, ENOTSUP,
5072                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5073                                           NULL,
5074                                           "meter action not supported");
5075         fm = mlx5_flow_meter_find(priv, am->mtr_id, NULL);
5076         if (!fm)
5077                 return rte_flow_error_set(error, EINVAL,
5078                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5079                                           "Meter not found");
5080         /* aso meter can always be shared by different domains */
5081         if (fm->ref_cnt && !priv->sh->meter_aso_en &&
5082             !(fm->transfer == attr->transfer ||
5083               (!fm->ingress && !attr->ingress && attr->egress) ||
5084               (!fm->egress && !attr->egress && attr->ingress)))
5085                 return rte_flow_error_set(error, EINVAL,
5086                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5087                         "Flow attributes domain are either invalid "
5088                         "or have a domain conflict with current "
5089                         "meter attributes");
5090         if (fm->def_policy) {
5091                 if (!((attr->transfer &&
5092                         mtrmng->def_policy[MLX5_MTR_DOMAIN_TRANSFER]) ||
5093                         (attr->egress &&
5094                         mtrmng->def_policy[MLX5_MTR_DOMAIN_EGRESS]) ||
5095                         (attr->ingress &&
5096                         mtrmng->def_policy[MLX5_MTR_DOMAIN_INGRESS])))
5097                         return rte_flow_error_set(error, EINVAL,
5098                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5099                                           "Flow attributes domain "
5100                                           "have a conflict with current "
5101                                           "meter domain attributes");
5102                 *def_policy = true;
5103         } else {
5104                 mtr_policy = mlx5_flow_meter_policy_find(dev,
5105                                                 fm->policy_id, NULL);
5106                 if (!mtr_policy)
5107                         return rte_flow_error_set(error, EINVAL,
5108                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5109                                           "Invalid policy id for meter ");
5110                 if (!((attr->transfer && mtr_policy->transfer) ||
5111                         (attr->egress && mtr_policy->egress) ||
5112                         (attr->ingress && mtr_policy->ingress)))
5113                         return rte_flow_error_set(error, EINVAL,
5114                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5115                                           "Flow attributes domain "
5116                                           "have a conflict with current "
5117                                           "meter domain attributes");
5118                 if (attr->transfer && mtr_policy->dev) {
5119                         /**
5120                          * When policy has fate action of port_id,
5121                          * the flow should have the same src port as policy.
5122                          */
5123                         struct mlx5_priv *policy_port_priv =
5124                                         mtr_policy->dev->data->dev_private;
5125                         int32_t flow_src_port = priv->representor_id;
5126
5127                         if (port_id_item) {
5128                                 const struct rte_flow_item_port_id *spec =
5129                                                         port_id_item->spec;
5130                                 struct mlx5_priv *port_priv =
5131                                         mlx5_port_to_eswitch_info(spec->id,
5132                                                                   false);
5133                                 if (!port_priv)
5134                                         return rte_flow_error_set(error,
5135                                                 rte_errno,
5136                                                 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
5137                                                 spec,
5138                                                 "Failed to get port info.");
5139                                 flow_src_port = port_priv->representor_id;
5140                         }
5141                         if (flow_src_port != policy_port_priv->representor_id)
5142                                 return rte_flow_error_set(error,
5143                                                 rte_errno,
5144                                                 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
5145                                                 NULL,
5146                                                 "Flow and meter policy "
5147                                                 "have different src port.");
5148                 }
5149                 *def_policy = false;
5150         }
5151         return 0;
5152 }
5153
5154 /**
5155  * Validate the age action.
5156  *
5157  * @param[in] action_flags
5158  *   Holds the actions detected until now.
5159  * @param[in] action
5160  *   Pointer to the age action.
5161  * @param[in] dev
5162  *   Pointer to the Ethernet device structure.
5163  * @param[out] error
5164  *   Pointer to error structure.
5165  *
5166  * @return
5167  *   0 on success, a negative errno value otherwise and rte_errno is set.
5168  */
5169 static int
5170 flow_dv_validate_action_age(uint64_t action_flags,
5171                             const struct rte_flow_action *action,
5172                             struct rte_eth_dev *dev,
5173                             struct rte_flow_error *error)
5174 {
5175         struct mlx5_priv *priv = dev->data->dev_private;
5176         const struct rte_flow_action_age *age = action->conf;
5177
5178         if (!priv->config.devx || (priv->sh->cmng.counter_fallback &&
5179             !priv->sh->aso_age_mng))
5180                 return rte_flow_error_set(error, ENOTSUP,
5181                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5182                                           NULL,
5183                                           "age action not supported");
5184         if (!(action->conf))
5185                 return rte_flow_error_set(error, EINVAL,
5186                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5187                                           "configuration cannot be null");
5188         if (!(age->timeout))
5189                 return rte_flow_error_set(error, EINVAL,
5190                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5191                                           "invalid timeout value 0");
5192         if (action_flags & MLX5_FLOW_ACTION_AGE)
5193                 return rte_flow_error_set(error, EINVAL,
5194                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5195                                           "duplicate age actions set");
5196         return 0;
5197 }
5198
5199 /**
5200  * Validate the modify-header IPv4 DSCP actions.
5201  *
5202  * @param[in] action_flags
5203  *   Holds the actions detected until now.
5204  * @param[in] action
5205  *   Pointer to the modify action.
5206  * @param[in] item_flags
5207  *   Holds the items detected.
5208  * @param[out] error
5209  *   Pointer to error structure.
5210  *
5211  * @return
5212  *   0 on success, a negative errno value otherwise and rte_errno is set.
5213  */
5214 static int
5215 flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags,
5216                                          const struct rte_flow_action *action,
5217                                          const uint64_t item_flags,
5218                                          struct rte_flow_error *error)
5219 {
5220         int ret = 0;
5221
5222         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5223         if (!ret) {
5224                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
5225                         return rte_flow_error_set(error, EINVAL,
5226                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5227                                                   NULL,
5228                                                   "no ipv4 item in pattern");
5229         }
5230         return ret;
5231 }
5232
5233 /**
5234  * Validate the modify-header IPv6 DSCP actions.
5235  *
5236  * @param[in] action_flags
5237  *   Holds the actions detected until now.
5238  * @param[in] action
5239  *   Pointer to the modify action.
5240  * @param[in] item_flags
5241  *   Holds the items detected.
5242  * @param[out] error
5243  *   Pointer to error structure.
5244  *
5245  * @return
5246  *   0 on success, a negative errno value otherwise and rte_errno is set.
5247  */
5248 static int
5249 flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,
5250                                          const struct rte_flow_action *action,
5251                                          const uint64_t item_flags,
5252                                          struct rte_flow_error *error)
5253 {
5254         int ret = 0;
5255
5256         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5257         if (!ret) {
5258                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
5259                         return rte_flow_error_set(error, EINVAL,
5260                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5261                                                   NULL,
5262                                                   "no ipv6 item in pattern");
5263         }
5264         return ret;
5265 }
5266
5267 /**
5268  * Match modify-header resource.
5269  *
5270  * @param list
5271  *   Pointer to the hash list.
5272  * @param entry
5273  *   Pointer to exist resource entry object.
5274  * @param key
5275  *   Key of the new entry.
5276  * @param ctx
5277  *   Pointer to new modify-header resource.
5278  *
5279  * @return
5280  *   0 on matching, non-zero otherwise.
5281  */
5282 int
5283 flow_dv_modify_match_cb(struct mlx5_hlist *list __rte_unused,
5284                         struct mlx5_hlist_entry *entry,
5285                         uint64_t key __rte_unused, void *cb_ctx)
5286 {
5287         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5288         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5289         struct mlx5_flow_dv_modify_hdr_resource *resource =
5290                         container_of(entry, typeof(*resource), entry);
5291         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5292
5293         key_len += ref->actions_num * sizeof(ref->actions[0]);
5294         return ref->actions_num != resource->actions_num ||
5295                memcmp(&ref->ft_type, &resource->ft_type, key_len);
5296 }
5297
5298 struct mlx5_hlist_entry *
5299 flow_dv_modify_create_cb(struct mlx5_hlist *list, uint64_t key __rte_unused,
5300                          void *cb_ctx)
5301 {
5302         struct mlx5_dev_ctx_shared *sh = list->ctx;
5303         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5304         struct mlx5dv_dr_domain *ns;
5305         struct mlx5_flow_dv_modify_hdr_resource *entry;
5306         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5307         int ret;
5308         uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
5309         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5310
5311         entry = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*entry) + data_len, 0,
5312                             SOCKET_ID_ANY);
5313         if (!entry) {
5314                 rte_flow_error_set(ctx->error, ENOMEM,
5315                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5316                                    "cannot allocate resource memory");
5317                 return NULL;
5318         }
5319         rte_memcpy(&entry->ft_type,
5320                    RTE_PTR_ADD(ref, offsetof(typeof(*ref), ft_type)),
5321                    key_len + data_len);
5322         if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
5323                 ns = sh->fdb_domain;
5324         else if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
5325                 ns = sh->tx_domain;
5326         else
5327                 ns = sh->rx_domain;
5328         ret = mlx5_flow_os_create_flow_action_modify_header
5329                                         (sh->ctx, ns, entry,
5330                                          data_len, &entry->action);
5331         if (ret) {
5332                 mlx5_free(entry);
5333                 rte_flow_error_set(ctx->error, ENOMEM,
5334                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5335                                    NULL, "cannot create modification action");
5336                 return NULL;
5337         }
5338         return &entry->entry;
5339 }
5340
5341 /**
5342  * Validate the sample action.
5343  *
5344  * @param[in, out] action_flags
5345  *   Holds the actions detected until now.
5346  * @param[in] action
5347  *   Pointer to the sample action.
5348  * @param[in] dev
5349  *   Pointer to the Ethernet device structure.
5350  * @param[in] attr
5351  *   Attributes of flow that includes this action.
5352  * @param[in] item_flags
5353  *   Holds the items detected.
5354  * @param[in] rss
5355  *   Pointer to the RSS action.
5356  * @param[out] sample_rss
5357  *   Pointer to the RSS action in sample action list.
5358  * @param[out] count
5359  *   Pointer to the COUNT action in sample action list.
5360  * @param[out] fdb_mirror_limit
5361  *   Pointer to the FDB mirror limitation flag.
5362  * @param[out] error
5363  *   Pointer to error structure.
5364  *
5365  * @return
5366  *   0 on success, a negative errno value otherwise and rte_errno is set.
5367  */
5368 static int
5369 flow_dv_validate_action_sample(uint64_t *action_flags,
5370                                const struct rte_flow_action *action,
5371                                struct rte_eth_dev *dev,
5372                                const struct rte_flow_attr *attr,
5373                                uint64_t item_flags,
5374                                const struct rte_flow_action_rss *rss,
5375                                const struct rte_flow_action_rss **sample_rss,
5376                                const struct rte_flow_action_count **count,
5377                                int *fdb_mirror_limit,
5378                                struct rte_flow_error *error)
5379 {
5380         struct mlx5_priv *priv = dev->data->dev_private;
5381         struct mlx5_dev_config *dev_conf = &priv->config;
5382         const struct rte_flow_action_sample *sample = action->conf;
5383         const struct rte_flow_action *act;
5384         uint64_t sub_action_flags = 0;
5385         uint16_t queue_index = 0xFFFF;
5386         int actions_n = 0;
5387         int ret;
5388
5389         if (!sample)
5390                 return rte_flow_error_set(error, EINVAL,
5391                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5392                                           "configuration cannot be NULL");
5393         if (sample->ratio == 0)
5394                 return rte_flow_error_set(error, EINVAL,
5395                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5396                                           "ratio value starts from 1");
5397         if (!priv->config.devx || (sample->ratio > 0 && !priv->sampler_en))
5398                 return rte_flow_error_set(error, ENOTSUP,
5399                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5400                                           NULL,
5401                                           "sample action not supported");
5402         if (*action_flags & MLX5_FLOW_ACTION_SAMPLE)
5403                 return rte_flow_error_set(error, EINVAL,
5404                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5405                                           "Multiple sample actions not "
5406                                           "supported");
5407         if (*action_flags & MLX5_FLOW_ACTION_METER)
5408                 return rte_flow_error_set(error, EINVAL,
5409                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5410                                           "wrong action order, meter should "
5411                                           "be after sample action");
5412         if (*action_flags & MLX5_FLOW_ACTION_JUMP)
5413                 return rte_flow_error_set(error, EINVAL,
5414                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5415                                           "wrong action order, jump should "
5416                                           "be after sample action");
5417         act = sample->actions;
5418         for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
5419                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
5420                         return rte_flow_error_set(error, ENOTSUP,
5421                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5422                                                   act, "too many actions");
5423                 switch (act->type) {
5424                 case RTE_FLOW_ACTION_TYPE_QUEUE:
5425                         ret = mlx5_flow_validate_action_queue(act,
5426                                                               sub_action_flags,
5427                                                               dev,
5428                                                               attr, error);
5429                         if (ret < 0)
5430                                 return ret;
5431                         queue_index = ((const struct rte_flow_action_queue *)
5432                                                         (act->conf))->index;
5433                         sub_action_flags |= MLX5_FLOW_ACTION_QUEUE;
5434                         ++actions_n;
5435                         break;
5436                 case RTE_FLOW_ACTION_TYPE_RSS:
5437                         *sample_rss = act->conf;
5438                         ret = mlx5_flow_validate_action_rss(act,
5439                                                             sub_action_flags,
5440                                                             dev, attr,
5441                                                             item_flags,
5442                                                             error);
5443                         if (ret < 0)
5444                                 return ret;
5445                         if (rss && *sample_rss &&
5446                             ((*sample_rss)->level != rss->level ||
5447                             (*sample_rss)->types != rss->types))
5448                                 return rte_flow_error_set(error, ENOTSUP,
5449                                         RTE_FLOW_ERROR_TYPE_ACTION,
5450                                         NULL,
5451                                         "Can't use the different RSS types "
5452                                         "or level in the same flow");
5453                         if (*sample_rss != NULL && (*sample_rss)->queue_num)
5454                                 queue_index = (*sample_rss)->queue[0];
5455                         sub_action_flags |= MLX5_FLOW_ACTION_RSS;
5456                         ++actions_n;
5457                         break;
5458                 case RTE_FLOW_ACTION_TYPE_MARK:
5459                         ret = flow_dv_validate_action_mark(dev, act,
5460                                                            sub_action_flags,
5461                                                            attr, error);
5462                         if (ret < 0)
5463                                 return ret;
5464                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
5465                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK |
5466                                                 MLX5_FLOW_ACTION_MARK_EXT;
5467                         else
5468                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK;
5469                         ++actions_n;
5470                         break;
5471                 case RTE_FLOW_ACTION_TYPE_COUNT:
5472                         ret = flow_dv_validate_action_count
5473                                 (dev, is_shared_action_count(act),
5474                                  *action_flags | sub_action_flags,
5475                                  error);
5476                         if (ret < 0)
5477                                 return ret;
5478                         *count = act->conf;
5479                         sub_action_flags |= MLX5_FLOW_ACTION_COUNT;
5480                         *action_flags |= MLX5_FLOW_ACTION_COUNT;
5481                         ++actions_n;
5482                         break;
5483                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
5484                         ret = flow_dv_validate_action_port_id(dev,
5485                                                               sub_action_flags,
5486                                                               act,
5487                                                               attr,
5488                                                               error);
5489                         if (ret)
5490                                 return ret;
5491                         sub_action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5492                         ++actions_n;
5493                         break;
5494                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5495                         ret = flow_dv_validate_action_raw_encap_decap
5496                                 (dev, NULL, act->conf, attr, &sub_action_flags,
5497                                  &actions_n, action, item_flags, error);
5498                         if (ret < 0)
5499                                 return ret;
5500                         ++actions_n;
5501                         break;
5502                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5503                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5504                         ret = flow_dv_validate_action_l2_encap(dev,
5505                                                                sub_action_flags,
5506                                                                act, attr,
5507                                                                error);
5508                         if (ret < 0)
5509                                 return ret;
5510                         sub_action_flags |= MLX5_FLOW_ACTION_ENCAP;
5511                         ++actions_n;
5512                         break;
5513                 default:
5514                         return rte_flow_error_set(error, ENOTSUP,
5515                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5516                                                   NULL,
5517                                                   "Doesn't support optional "
5518                                                   "action");
5519                 }
5520         }
5521         if (attr->ingress && !attr->transfer) {
5522                 if (!(sub_action_flags & (MLX5_FLOW_ACTION_QUEUE |
5523                                           MLX5_FLOW_ACTION_RSS)))
5524                         return rte_flow_error_set(error, EINVAL,
5525                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5526                                                   NULL,
5527                                                   "Ingress must has a dest "
5528                                                   "QUEUE for Sample");
5529         } else if (attr->egress && !attr->transfer) {
5530                 return rte_flow_error_set(error, ENOTSUP,
5531                                           RTE_FLOW_ERROR_TYPE_ACTION,
5532                                           NULL,
5533                                           "Sample Only support Ingress "
5534                                           "or E-Switch");
5535         } else if (sample->actions->type != RTE_FLOW_ACTION_TYPE_END) {
5536                 MLX5_ASSERT(attr->transfer);
5537                 if (sample->ratio > 1)
5538                         return rte_flow_error_set(error, ENOTSUP,
5539                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5540                                                   NULL,
5541                                                   "E-Switch doesn't support "
5542                                                   "any optional action "
5543                                                   "for sampling");
5544                 if (sub_action_flags & MLX5_FLOW_ACTION_QUEUE)
5545                         return rte_flow_error_set(error, ENOTSUP,
5546                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5547                                                   NULL,
5548                                                   "unsupported action QUEUE");
5549                 if (sub_action_flags & MLX5_FLOW_ACTION_RSS)
5550                         return rte_flow_error_set(error, ENOTSUP,
5551                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5552                                                   NULL,
5553                                                   "unsupported action QUEUE");
5554                 if (!(sub_action_flags & MLX5_FLOW_ACTION_PORT_ID))
5555                         return rte_flow_error_set(error, EINVAL,
5556                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5557                                                   NULL,
5558                                                   "E-Switch must has a dest "
5559                                                   "port for mirroring");
5560                 if (!priv->config.hca_attr.reg_c_preserve &&
5561                      priv->representor_id != UINT16_MAX)
5562                         *fdb_mirror_limit = 1;
5563         }
5564         /* Continue validation for Xcap actions.*/
5565         if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) &&
5566             (queue_index == 0xFFFF ||
5567              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
5568                 if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
5569                      MLX5_FLOW_XCAP_ACTIONS)
5570                         return rte_flow_error_set(error, ENOTSUP,
5571                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5572                                                   NULL, "encap and decap "
5573                                                   "combination aren't "
5574                                                   "supported");
5575                 if (!attr->transfer && attr->ingress && (sub_action_flags &
5576                                                         MLX5_FLOW_ACTION_ENCAP))
5577                         return rte_flow_error_set(error, ENOTSUP,
5578                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5579                                                   NULL, "encap is not supported"
5580                                                   " for ingress traffic");
5581         }
5582         return 0;
5583 }
5584
5585 /**
5586  * Find existing modify-header resource or create and register a new one.
5587  *
5588  * @param dev[in, out]
5589  *   Pointer to rte_eth_dev structure.
5590  * @param[in, out] resource
5591  *   Pointer to modify-header resource.
5592  * @parm[in, out] dev_flow
5593  *   Pointer to the dev_flow.
5594  * @param[out] error
5595  *   pointer to error structure.
5596  *
5597  * @return
5598  *   0 on success otherwise -errno and errno is set.
5599  */
5600 static int
5601 flow_dv_modify_hdr_resource_register
5602                         (struct rte_eth_dev *dev,
5603                          struct mlx5_flow_dv_modify_hdr_resource *resource,
5604                          struct mlx5_flow *dev_flow,
5605                          struct rte_flow_error *error)
5606 {
5607         struct mlx5_priv *priv = dev->data->dev_private;
5608         struct mlx5_dev_ctx_shared *sh = priv->sh;
5609         uint32_t key_len = sizeof(*resource) -
5610                            offsetof(typeof(*resource), ft_type) +
5611                            resource->actions_num * sizeof(resource->actions[0]);
5612         struct mlx5_hlist_entry *entry;
5613         struct mlx5_flow_cb_ctx ctx = {
5614                 .error = error,
5615                 .data = resource,
5616         };
5617         uint64_t key64;
5618
5619         resource->flags = dev_flow->dv.group ? 0 :
5620                           MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
5621         if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
5622                                     resource->flags))
5623                 return rte_flow_error_set(error, EOVERFLOW,
5624                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5625                                           "too many modify header items");
5626         key64 = __rte_raw_cksum(&resource->ft_type, key_len, 0);
5627         entry = mlx5_hlist_register(sh->modify_cmds, key64, &ctx);
5628         if (!entry)
5629                 return -rte_errno;
5630         resource = container_of(entry, typeof(*resource), entry);
5631         dev_flow->handle->dvh.modify_hdr = resource;
5632         return 0;
5633 }
5634
5635 /**
5636  * Get DV flow counter by index.
5637  *
5638  * @param[in] dev
5639  *   Pointer to the Ethernet device structure.
5640  * @param[in] idx
5641  *   mlx5 flow counter index in the container.
5642  * @param[out] ppool
5643  *   mlx5 flow counter pool in the container.
5644  *
5645  * @return
5646  *   Pointer to the counter, NULL otherwise.
5647  */
5648 static struct mlx5_flow_counter *
5649 flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
5650                            uint32_t idx,
5651                            struct mlx5_flow_counter_pool **ppool)
5652 {
5653         struct mlx5_priv *priv = dev->data->dev_private;
5654         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5655         struct mlx5_flow_counter_pool *pool;
5656
5657         /* Decrease to original index and clear shared bit. */
5658         idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
5659         MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cmng->n);
5660         pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
5661         MLX5_ASSERT(pool);
5662         if (ppool)
5663                 *ppool = pool;
5664         return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
5665 }
5666
5667 /**
5668  * Check the devx counter belongs to the pool.
5669  *
5670  * @param[in] pool
5671  *   Pointer to the counter pool.
5672  * @param[in] id
5673  *   The counter devx ID.
5674  *
5675  * @return
5676  *   True if counter belongs to the pool, false otherwise.
5677  */
5678 static bool
5679 flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id)
5680 {
5681         int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
5682                    MLX5_COUNTERS_PER_POOL;
5683
5684         if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
5685                 return true;
5686         return false;
5687 }
5688
5689 /**
5690  * Get a pool by devx counter ID.
5691  *
5692  * @param[in] cmng
5693  *   Pointer to the counter management.
5694  * @param[in] id
5695  *   The counter devx ID.
5696  *
5697  * @return
5698  *   The counter pool pointer if exists, NULL otherwise,
5699  */
5700 static struct mlx5_flow_counter_pool *
5701 flow_dv_find_pool_by_id(struct mlx5_flow_counter_mng *cmng, int id)
5702 {
5703         uint32_t i;
5704         struct mlx5_flow_counter_pool *pool = NULL;
5705
5706         rte_spinlock_lock(&cmng->pool_update_sl);
5707         /* Check last used pool. */
5708         if (cmng->last_pool_idx != POOL_IDX_INVALID &&
5709             flow_dv_is_counter_in_pool(cmng->pools[cmng->last_pool_idx], id)) {
5710                 pool = cmng->pools[cmng->last_pool_idx];
5711                 goto out;
5712         }
5713         /* ID out of range means no suitable pool in the container. */
5714         if (id > cmng->max_id || id < cmng->min_id)
5715                 goto out;
5716         /*
5717          * Find the pool from the end of the container, since mostly counter
5718          * ID is sequence increasing, and the last pool should be the needed
5719          * one.
5720          */
5721         i = cmng->n_valid;
5722         while (i--) {
5723                 struct mlx5_flow_counter_pool *pool_tmp = cmng->pools[i];
5724
5725                 if (flow_dv_is_counter_in_pool(pool_tmp, id)) {
5726                         pool = pool_tmp;
5727                         break;
5728                 }
5729         }
5730 out:
5731         rte_spinlock_unlock(&cmng->pool_update_sl);
5732         return pool;
5733 }
5734
5735 /**
5736  * Resize a counter container.
5737  *
5738  * @param[in] dev
5739  *   Pointer to the Ethernet device structure.
5740  *
5741  * @return
5742  *   0 on success, otherwise negative errno value and rte_errno is set.
5743  */
5744 static int
5745 flow_dv_container_resize(struct rte_eth_dev *dev)
5746 {
5747         struct mlx5_priv *priv = dev->data->dev_private;
5748         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5749         void *old_pools = cmng->pools;
5750         uint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE;
5751         uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
5752         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
5753
5754         if (!pools) {
5755                 rte_errno = ENOMEM;
5756                 return -ENOMEM;
5757         }
5758         if (old_pools)
5759                 memcpy(pools, old_pools, cmng->n *
5760                                        sizeof(struct mlx5_flow_counter_pool *));
5761         cmng->n = resize;
5762         cmng->pools = pools;
5763         if (old_pools)
5764                 mlx5_free(old_pools);
5765         return 0;
5766 }
5767
5768 /**
5769  * Query a devx flow counter.
5770  *
5771  * @param[in] dev
5772  *   Pointer to the Ethernet device structure.
5773  * @param[in] counter
5774  *   Index to the flow counter.
5775  * @param[out] pkts
5776  *   The statistics value of packets.
5777  * @param[out] bytes
5778  *   The statistics value of bytes.
5779  *
5780  * @return
5781  *   0 on success, otherwise a negative errno value and rte_errno is set.
5782  */
5783 static inline int
5784 _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
5785                      uint64_t *bytes)
5786 {
5787         struct mlx5_priv *priv = dev->data->dev_private;
5788         struct mlx5_flow_counter_pool *pool = NULL;
5789         struct mlx5_flow_counter *cnt;
5790         int offset;
5791
5792         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
5793         MLX5_ASSERT(pool);
5794         if (priv->sh->cmng.counter_fallback)
5795                 return mlx5_devx_cmd_flow_counter_query(cnt->dcs_when_active, 0,
5796                                         0, pkts, bytes, 0, NULL, NULL, 0);
5797         rte_spinlock_lock(&pool->sl);
5798         if (!pool->raw) {
5799                 *pkts = 0;
5800                 *bytes = 0;
5801         } else {
5802                 offset = MLX5_CNT_ARRAY_IDX(pool, cnt);
5803                 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
5804                 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
5805         }
5806         rte_spinlock_unlock(&pool->sl);
5807         return 0;
5808 }
5809
5810 /**
5811  * Create and initialize a new counter pool.
5812  *
5813  * @param[in] dev
5814  *   Pointer to the Ethernet device structure.
5815  * @param[out] dcs
5816  *   The devX counter handle.
5817  * @param[in] age
5818  *   Whether the pool is for counter that was allocated for aging.
5819  * @param[in/out] cont_cur
5820  *   Pointer to the container pointer, it will be update in pool resize.
5821  *
5822  * @return
5823  *   The pool container pointer on success, NULL otherwise and rte_errno is set.
5824  */
5825 static struct mlx5_flow_counter_pool *
5826 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
5827                     uint32_t age)
5828 {
5829         struct mlx5_priv *priv = dev->data->dev_private;
5830         struct mlx5_flow_counter_pool *pool;
5831         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5832         bool fallback = priv->sh->cmng.counter_fallback;
5833         uint32_t size = sizeof(*pool);
5834
5835         size += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE;
5836         size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * MLX5_AGE_SIZE);
5837         pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
5838         if (!pool) {
5839                 rte_errno = ENOMEM;
5840                 return NULL;
5841         }
5842         pool->raw = NULL;
5843         pool->is_aged = !!age;
5844         pool->query_gen = 0;
5845         pool->min_dcs = dcs;
5846         rte_spinlock_init(&pool->sl);
5847         rte_spinlock_init(&pool->csl);
5848         TAILQ_INIT(&pool->counters[0]);
5849         TAILQ_INIT(&pool->counters[1]);
5850         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
5851         rte_spinlock_lock(&cmng->pool_update_sl);
5852         pool->index = cmng->n_valid;
5853         if (pool->index == cmng->n && flow_dv_container_resize(dev)) {
5854                 mlx5_free(pool);
5855                 rte_spinlock_unlock(&cmng->pool_update_sl);
5856                 return NULL;
5857         }
5858         cmng->pools[pool->index] = pool;
5859         cmng->n_valid++;
5860         if (unlikely(fallback)) {
5861                 int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
5862
5863                 if (base < cmng->min_id)
5864                         cmng->min_id = base;
5865                 if (base > cmng->max_id)
5866                         cmng->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
5867                 cmng->last_pool_idx = pool->index;
5868         }
5869         rte_spinlock_unlock(&cmng->pool_update_sl);
5870         return pool;
5871 }
5872
5873 /**
5874  * Prepare a new counter and/or a new counter pool.
5875  *
5876  * @param[in] dev
5877  *   Pointer to the Ethernet device structure.
5878  * @param[out] cnt_free
5879  *   Where to put the pointer of a new counter.
5880  * @param[in] age
5881  *   Whether the pool is for counter that was allocated for aging.
5882  *
5883  * @return
5884  *   The counter pool pointer and @p cnt_free is set on success,
5885  *   NULL otherwise and rte_errno is set.
5886  */
5887 static struct mlx5_flow_counter_pool *
5888 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
5889                              struct mlx5_flow_counter **cnt_free,
5890                              uint32_t age)
5891 {
5892         struct mlx5_priv *priv = dev->data->dev_private;
5893         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5894         struct mlx5_flow_counter_pool *pool;
5895         struct mlx5_counters tmp_tq;
5896         struct mlx5_devx_obj *dcs = NULL;
5897         struct mlx5_flow_counter *cnt;
5898         enum mlx5_counter_type cnt_type =
5899                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
5900         bool fallback = priv->sh->cmng.counter_fallback;
5901         uint32_t i;
5902
5903         if (fallback) {
5904                 /* bulk_bitmap must be 0 for single counter allocation. */
5905                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
5906                 if (!dcs)
5907                         return NULL;
5908                 pool = flow_dv_find_pool_by_id(cmng, dcs->id);
5909                 if (!pool) {
5910                         pool = flow_dv_pool_create(dev, dcs, age);
5911                         if (!pool) {
5912                                 mlx5_devx_cmd_destroy(dcs);
5913                                 return NULL;
5914                         }
5915                 }
5916                 i = dcs->id % MLX5_COUNTERS_PER_POOL;
5917                 cnt = MLX5_POOL_GET_CNT(pool, i);
5918                 cnt->pool = pool;
5919                 cnt->dcs_when_free = dcs;
5920                 *cnt_free = cnt;
5921                 return pool;
5922         }
5923         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
5924         if (!dcs) {
5925                 rte_errno = ENODATA;
5926                 return NULL;
5927         }
5928         pool = flow_dv_pool_create(dev, dcs, age);
5929         if (!pool) {
5930                 mlx5_devx_cmd_destroy(dcs);
5931                 return NULL;
5932         }
5933         TAILQ_INIT(&tmp_tq);
5934         for (i = 1; i < MLX5_COUNTERS_PER_POOL; ++i) {
5935                 cnt = MLX5_POOL_GET_CNT(pool, i);
5936                 cnt->pool = pool;
5937                 TAILQ_INSERT_HEAD(&tmp_tq, cnt, next);
5938         }
5939         rte_spinlock_lock(&cmng->csl[cnt_type]);
5940         TAILQ_CONCAT(&cmng->counters[cnt_type], &tmp_tq, next);
5941         rte_spinlock_unlock(&cmng->csl[cnt_type]);
5942         *cnt_free = MLX5_POOL_GET_CNT(pool, 0);
5943         (*cnt_free)->pool = pool;
5944         return pool;
5945 }
5946
5947 /**
5948  * Allocate a flow counter.
5949  *
5950  * @param[in] dev
5951  *   Pointer to the Ethernet device structure.
5952  * @param[in] age
5953  *   Whether the counter was allocated for aging.
5954  *
5955  * @return
5956  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
5957  */
5958 static uint32_t
5959 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age)
5960 {
5961         struct mlx5_priv *priv = dev->data->dev_private;
5962         struct mlx5_flow_counter_pool *pool = NULL;
5963         struct mlx5_flow_counter *cnt_free = NULL;
5964         bool fallback = priv->sh->cmng.counter_fallback;
5965         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5966         enum mlx5_counter_type cnt_type =
5967                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
5968         uint32_t cnt_idx;
5969
5970         if (!priv->config.devx) {
5971                 rte_errno = ENOTSUP;
5972                 return 0;
5973         }
5974         /* Get free counters from container. */
5975         rte_spinlock_lock(&cmng->csl[cnt_type]);
5976         cnt_free = TAILQ_FIRST(&cmng->counters[cnt_type]);
5977         if (cnt_free)
5978                 TAILQ_REMOVE(&cmng->counters[cnt_type], cnt_free, next);
5979         rte_spinlock_unlock(&cmng->csl[cnt_type]);
5980         if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free, age))
5981                 goto err;
5982         pool = cnt_free->pool;
5983         if (fallback)
5984                 cnt_free->dcs_when_active = cnt_free->dcs_when_free;
5985         /* Create a DV counter action only in the first time usage. */
5986         if (!cnt_free->action) {
5987                 uint16_t offset;
5988                 struct mlx5_devx_obj *dcs;
5989                 int ret;
5990
5991                 if (!fallback) {
5992                         offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free);
5993                         dcs = pool->min_dcs;
5994                 } else {
5995                         offset = 0;
5996                         dcs = cnt_free->dcs_when_free;
5997                 }
5998                 ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset,
5999                                                             &cnt_free->action);
6000                 if (ret) {
6001                         rte_errno = errno;
6002                         goto err;
6003                 }
6004         }
6005         cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
6006                                 MLX5_CNT_ARRAY_IDX(pool, cnt_free));
6007         /* Update the counter reset values. */
6008         if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
6009                                  &cnt_free->bytes))
6010                 goto err;
6011         if (!fallback && !priv->sh->cmng.query_thread_on)
6012                 /* Start the asynchronous batch query by the host thread. */
6013                 mlx5_set_query_alarm(priv->sh);
6014         /*
6015          * When the count action isn't shared (by ID), shared_info field is
6016          * used for indirect action API's refcnt.
6017          * When the counter action is not shared neither by ID nor by indirect
6018          * action API, shared info must be 1.
6019          */
6020         cnt_free->shared_info.refcnt = 1;
6021         return cnt_idx;
6022 err:
6023         if (cnt_free) {
6024                 cnt_free->pool = pool;
6025                 if (fallback)
6026                         cnt_free->dcs_when_free = cnt_free->dcs_when_active;
6027                 rte_spinlock_lock(&cmng->csl[cnt_type]);
6028                 TAILQ_INSERT_TAIL(&cmng->counters[cnt_type], cnt_free, next);
6029                 rte_spinlock_unlock(&cmng->csl[cnt_type]);
6030         }
6031         return 0;
6032 }
6033
6034 /**
6035  * Allocate a shared flow counter.
6036  *
6037  * @param[in] ctx
6038  *   Pointer to the shared counter configuration.
6039  * @param[in] data
6040  *   Pointer to save the allocated counter index.
6041  *
6042  * @return
6043  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
6044  */
6045
6046 static int32_t
6047 flow_dv_counter_alloc_shared_cb(void *ctx, union mlx5_l3t_data *data)
6048 {
6049         struct mlx5_shared_counter_conf *conf = ctx;
6050         struct rte_eth_dev *dev = conf->dev;
6051         struct mlx5_flow_counter *cnt;
6052
6053         data->dword = flow_dv_counter_alloc(dev, 0);
6054         data->dword |= MLX5_CNT_SHARED_OFFSET;
6055         cnt = flow_dv_counter_get_by_idx(dev, data->dword, NULL);
6056         cnt->shared_info.id = conf->id;
6057         return 0;
6058 }
6059
6060 /**
6061  * Get a shared flow counter.
6062  *
6063  * @param[in] dev
6064  *   Pointer to the Ethernet device structure.
6065  * @param[in] id
6066  *   Counter identifier.
6067  *
6068  * @return
6069  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
6070  */
6071 static uint32_t
6072 flow_dv_counter_get_shared(struct rte_eth_dev *dev, uint32_t id)
6073 {
6074         struct mlx5_priv *priv = dev->data->dev_private;
6075         struct mlx5_shared_counter_conf conf = {
6076                 .dev = dev,
6077                 .id = id,
6078         };
6079         union mlx5_l3t_data data = {
6080                 .dword = 0,
6081         };
6082
6083         mlx5_l3t_prepare_entry(priv->sh->cnt_id_tbl, id, &data,
6084                                flow_dv_counter_alloc_shared_cb, &conf);
6085         return data.dword;
6086 }
6087
6088 /**
6089  * Get age param from counter index.
6090  *
6091  * @param[in] dev
6092  *   Pointer to the Ethernet device structure.
6093  * @param[in] counter
6094  *   Index to the counter handler.
6095  *
6096  * @return
6097  *   The aging parameter specified for the counter index.
6098  */
6099 static struct mlx5_age_param*
6100 flow_dv_counter_idx_get_age(struct rte_eth_dev *dev,
6101                                 uint32_t counter)
6102 {
6103         struct mlx5_flow_counter *cnt;
6104         struct mlx5_flow_counter_pool *pool = NULL;
6105
6106         flow_dv_counter_get_by_idx(dev, counter, &pool);
6107         counter = (counter - 1) % MLX5_COUNTERS_PER_POOL;
6108         cnt = MLX5_POOL_GET_CNT(pool, counter);
6109         return MLX5_CNT_TO_AGE(cnt);
6110 }
6111
6112 /**
6113  * Remove a flow counter from aged counter list.
6114  *
6115  * @param[in] dev
6116  *   Pointer to the Ethernet device structure.
6117  * @param[in] counter
6118  *   Index to the counter handler.
6119  * @param[in] cnt
6120  *   Pointer to the counter handler.
6121  */
6122 static void
6123 flow_dv_counter_remove_from_age(struct rte_eth_dev *dev,
6124                                 uint32_t counter, struct mlx5_flow_counter *cnt)
6125 {
6126         struct mlx5_age_info *age_info;
6127         struct mlx5_age_param *age_param;
6128         struct mlx5_priv *priv = dev->data->dev_private;
6129         uint16_t expected = AGE_CANDIDATE;
6130
6131         age_info = GET_PORT_AGE_INFO(priv);
6132         age_param = flow_dv_counter_idx_get_age(dev, counter);
6133         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
6134                                          AGE_FREE, false, __ATOMIC_RELAXED,
6135                                          __ATOMIC_RELAXED)) {
6136                 /**
6137                  * We need the lock even it is age timeout,
6138                  * since counter may still in process.
6139                  */
6140                 rte_spinlock_lock(&age_info->aged_sl);
6141                 TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
6142                 rte_spinlock_unlock(&age_info->aged_sl);
6143                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
6144         }
6145 }
6146
6147 /**
6148  * Release a flow counter.
6149  *
6150  * @param[in] dev
6151  *   Pointer to the Ethernet device structure.
6152  * @param[in] counter
6153  *   Index to the counter handler.
6154  */
6155 static void
6156 flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter)
6157 {
6158         struct mlx5_priv *priv = dev->data->dev_private;
6159         struct mlx5_flow_counter_pool *pool = NULL;
6160         struct mlx5_flow_counter *cnt;
6161         enum mlx5_counter_type cnt_type;
6162
6163         if (!counter)
6164                 return;
6165         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
6166         MLX5_ASSERT(pool);
6167         if (pool->is_aged) {
6168                 flow_dv_counter_remove_from_age(dev, counter, cnt);
6169         } else {
6170                 /*
6171                  * If the counter action is shared by ID, the l3t_clear_entry
6172                  * function reduces its references counter. If after the
6173                  * reduction the action is still referenced, the function
6174                  * returns here and does not release it.
6175                  */
6176                 if (IS_LEGACY_SHARED_CNT(counter) &&
6177                     mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl,
6178                                          cnt->shared_info.id))
6179                         return;
6180                 /*
6181                  * If the counter action is shared by indirect action API,
6182                  * the atomic function reduces its references counter.
6183                  * If after the reduction the action is still referenced, the
6184                  * function returns here and does not release it.
6185                  * When the counter action is not shared neither by ID nor by
6186                  * indirect action API, shared info is 1 before the reduction,
6187                  * so this condition is failed and function doesn't return here.
6188                  */
6189                 if (!IS_LEGACY_SHARED_CNT(counter) &&
6190                     __atomic_sub_fetch(&cnt->shared_info.refcnt, 1,
6191                                        __ATOMIC_RELAXED))
6192                         return;
6193         }
6194         cnt->pool = pool;
6195         /*
6196          * Put the counter back to list to be updated in none fallback mode.
6197          * Currently, we are using two list alternately, while one is in query,
6198          * add the freed counter to the other list based on the pool query_gen
6199          * value. After query finishes, add counter the list to the global
6200          * container counter list. The list changes while query starts. In
6201          * this case, lock will not be needed as query callback and release
6202          * function both operate with the different list.
6203          */
6204         if (!priv->sh->cmng.counter_fallback) {
6205                 rte_spinlock_lock(&pool->csl);
6206                 TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
6207                 rte_spinlock_unlock(&pool->csl);
6208         } else {
6209                 cnt->dcs_when_free = cnt->dcs_when_active;
6210                 cnt_type = pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
6211                                            MLX5_COUNTER_TYPE_ORIGIN;
6212                 rte_spinlock_lock(&priv->sh->cmng.csl[cnt_type]);
6213                 TAILQ_INSERT_TAIL(&priv->sh->cmng.counters[cnt_type],
6214                                   cnt, next);
6215                 rte_spinlock_unlock(&priv->sh->cmng.csl[cnt_type]);
6216         }
6217 }
6218
6219 /**
6220  * Resize a meter id container.
6221  *
6222  * @param[in] dev
6223  *   Pointer to the Ethernet device structure.
6224  *
6225  * @return
6226  *   0 on success, otherwise negative errno value and rte_errno is set.
6227  */
6228 static int
6229 flow_dv_mtr_container_resize(struct rte_eth_dev *dev)
6230 {
6231         struct mlx5_priv *priv = dev->data->dev_private;
6232         struct mlx5_aso_mtr_pools_mng *pools_mng =
6233                                 &priv->sh->mtrmng->pools_mng;
6234         void *old_pools = pools_mng->pools;
6235         uint32_t resize = pools_mng->n + MLX5_MTRS_CONTAINER_RESIZE;
6236         uint32_t mem_size = sizeof(struct mlx5_aso_mtr_pool *) * resize;
6237         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
6238
6239         if (!pools) {
6240                 rte_errno = ENOMEM;
6241                 return -ENOMEM;
6242         }
6243         if (!pools_mng->n)
6244                 if (mlx5_aso_queue_init(priv->sh, ASO_OPC_MOD_POLICER)) {
6245                         mlx5_free(pools);
6246                         return -ENOMEM;
6247                 }
6248         if (old_pools)
6249                 memcpy(pools, old_pools, pools_mng->n *
6250                                        sizeof(struct mlx5_aso_mtr_pool *));
6251         pools_mng->n = resize;
6252         pools_mng->pools = pools;
6253         if (old_pools)
6254                 mlx5_free(old_pools);
6255         return 0;
6256 }
6257
6258 /**
6259  * Prepare a new meter and/or a new meter pool.
6260  *
6261  * @param[in] dev
6262  *   Pointer to the Ethernet device structure.
6263  * @param[out] mtr_free
6264  *   Where to put the pointer of a new meter.g.
6265  *
6266  * @return
6267  *   The meter pool pointer and @mtr_free is set on success,
6268  *   NULL otherwise and rte_errno is set.
6269  */
6270 static struct mlx5_aso_mtr_pool *
6271 flow_dv_mtr_pool_create(struct rte_eth_dev *dev,
6272                              struct mlx5_aso_mtr **mtr_free)
6273 {
6274         struct mlx5_priv *priv = dev->data->dev_private;
6275         struct mlx5_aso_mtr_pools_mng *pools_mng =
6276                                 &priv->sh->mtrmng->pools_mng;
6277         struct mlx5_aso_mtr_pool *pool = NULL;
6278         struct mlx5_devx_obj *dcs = NULL;
6279         uint32_t i;
6280         uint32_t log_obj_size;
6281
6282         log_obj_size = rte_log2_u32(MLX5_ASO_MTRS_PER_POOL >> 1);
6283         dcs = mlx5_devx_cmd_create_flow_meter_aso_obj(priv->sh->ctx,
6284                         priv->sh->pdn, log_obj_size);
6285         if (!dcs) {
6286                 rte_errno = ENODATA;
6287                 return NULL;
6288         }
6289         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
6290         if (!pool) {
6291                 rte_errno = ENOMEM;
6292                 claim_zero(mlx5_devx_cmd_destroy(dcs));
6293                 return NULL;
6294         }
6295         pool->devx_obj = dcs;
6296         pool->index = pools_mng->n_valid;
6297         if (pool->index == pools_mng->n && flow_dv_mtr_container_resize(dev)) {
6298                 mlx5_free(pool);
6299                 claim_zero(mlx5_devx_cmd_destroy(dcs));
6300                 return NULL;
6301         }
6302         pools_mng->pools[pool->index] = pool;
6303         pools_mng->n_valid++;
6304         for (i = 1; i < MLX5_ASO_MTRS_PER_POOL; ++i) {
6305                 pool->mtrs[i].offset = i;
6306                 LIST_INSERT_HEAD(&pools_mng->meters,
6307                                                 &pool->mtrs[i], next);
6308         }
6309         pool->mtrs[0].offset = 0;
6310         *mtr_free = &pool->mtrs[0];
6311         return pool;
6312 }
6313
6314 /**
6315  * Release a flow meter into pool.
6316  *
6317  * @param[in] dev
6318  *   Pointer to the Ethernet device structure.
6319  * @param[in] mtr_idx
6320  *   Index to aso flow meter.
6321  */
6322 static void
6323 flow_dv_aso_mtr_release_to_pool(struct rte_eth_dev *dev, uint32_t mtr_idx)
6324 {
6325         struct mlx5_priv *priv = dev->data->dev_private;
6326         struct mlx5_aso_mtr_pools_mng *pools_mng =
6327                                 &priv->sh->mtrmng->pools_mng;
6328         struct mlx5_aso_mtr *aso_mtr = mlx5_aso_meter_by_idx(priv, mtr_idx);
6329
6330         MLX5_ASSERT(aso_mtr);
6331         rte_spinlock_lock(&pools_mng->mtrsl);
6332         memset(&aso_mtr->fm, 0, sizeof(struct mlx5_flow_meter_info));
6333         aso_mtr->state = ASO_METER_FREE;
6334         LIST_INSERT_HEAD(&pools_mng->meters, aso_mtr, next);
6335         rte_spinlock_unlock(&pools_mng->mtrsl);
6336 }
6337
6338 /**
6339  * Allocate a aso flow meter.
6340  *
6341  * @param[in] dev
6342  *   Pointer to the Ethernet device structure.
6343  *
6344  * @return
6345  *   Index to aso flow meter on success, 0 otherwise and rte_errno is set.
6346  */
6347 static uint32_t
6348 flow_dv_mtr_alloc(struct rte_eth_dev *dev)
6349 {
6350         struct mlx5_priv *priv = dev->data->dev_private;
6351         struct mlx5_aso_mtr *mtr_free = NULL;
6352         struct mlx5_aso_mtr_pools_mng *pools_mng =
6353                                 &priv->sh->mtrmng->pools_mng;
6354         struct mlx5_aso_mtr_pool *pool;
6355         uint32_t mtr_idx = 0;
6356
6357         if (!priv->config.devx) {
6358                 rte_errno = ENOTSUP;
6359                 return 0;
6360         }
6361         /* Allocate the flow meter memory. */
6362         /* Get free meters from management. */
6363         rte_spinlock_lock(&pools_mng->mtrsl);
6364         mtr_free = LIST_FIRST(&pools_mng->meters);
6365         if (mtr_free)
6366                 LIST_REMOVE(mtr_free, next);
6367         if (!mtr_free && !flow_dv_mtr_pool_create(dev, &mtr_free)) {
6368                 rte_spinlock_unlock(&pools_mng->mtrsl);
6369                 return 0;
6370         }
6371         mtr_free->state = ASO_METER_WAIT;
6372         rte_spinlock_unlock(&pools_mng->mtrsl);
6373         pool = container_of(mtr_free,
6374                         struct mlx5_aso_mtr_pool,
6375                         mtrs[mtr_free->offset]);
6376         mtr_idx = MLX5_MAKE_MTR_IDX(pool->index, mtr_free->offset);
6377         if (!mtr_free->fm.meter_action) {
6378 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
6379                 struct rte_flow_error error;
6380                 uint8_t reg_id;
6381
6382                 reg_id = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &error);
6383                 mtr_free->fm.meter_action =
6384                         mlx5_glue->dv_create_flow_action_aso
6385                                                 (priv->sh->rx_domain,
6386                                                  pool->devx_obj->obj,
6387                                                  mtr_free->offset,
6388                                                  (1 << MLX5_FLOW_COLOR_GREEN),
6389                                                  reg_id - REG_C_0);
6390 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
6391                 if (!mtr_free->fm.meter_action) {
6392                         flow_dv_aso_mtr_release_to_pool(dev, mtr_idx);
6393                         return 0;
6394                 }
6395         }
6396         return mtr_idx;
6397 }
6398
6399 /**
6400  * Verify the @p attributes will be correctly understood by the NIC and store
6401  * them in the @p flow if everything is correct.
6402  *
6403  * @param[in] dev
6404  *   Pointer to dev struct.
6405  * @param[in] attributes
6406  *   Pointer to flow attributes
6407  * @param[in] external
6408  *   This flow rule is created by request external to PMD.
6409  * @param[out] error
6410  *   Pointer to error structure.
6411  *
6412  * @return
6413  *   - 0 on success and non root table.
6414  *   - 1 on success and root table.
6415  *   - a negative errno value otherwise and rte_errno is set.
6416  */
6417 static int
6418 flow_dv_validate_attributes(struct rte_eth_dev *dev,
6419                             const struct mlx5_flow_tunnel *tunnel,
6420                             const struct rte_flow_attr *attributes,
6421                             const struct flow_grp_info *grp_info,
6422                             struct rte_flow_error *error)
6423 {
6424         struct mlx5_priv *priv = dev->data->dev_private;
6425         uint32_t lowest_priority = mlx5_get_lowest_priority(dev, attributes);
6426         int ret = 0;
6427
6428 #ifndef HAVE_MLX5DV_DR
6429         RTE_SET_USED(tunnel);
6430         RTE_SET_USED(grp_info);
6431         if (attributes->group)
6432                 return rte_flow_error_set(error, ENOTSUP,
6433                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
6434                                           NULL,
6435                                           "groups are not supported");
6436 #else
6437         uint32_t table = 0;
6438
6439         ret = mlx5_flow_group_to_table(dev, tunnel, attributes->group, &table,
6440                                        grp_info, error);
6441         if (ret)
6442                 return ret;
6443         if (!table)
6444                 ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
6445 #endif
6446         if (attributes->priority != MLX5_FLOW_LOWEST_PRIO_INDICATOR &&
6447             attributes->priority > lowest_priority)
6448                 return rte_flow_error_set(error, ENOTSUP,
6449                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
6450                                           NULL,
6451                                           "priority out of range");
6452         if (attributes->transfer) {
6453                 if (!priv->config.dv_esw_en)
6454                         return rte_flow_error_set
6455                                 (error, ENOTSUP,
6456                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6457                                  "E-Switch dr is not supported");
6458                 if (!(priv->representor || priv->master))
6459                         return rte_flow_error_set
6460                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6461                                  NULL, "E-Switch configuration can only be"
6462                                  " done by a master or a representor device");
6463                 if (attributes->egress)
6464                         return rte_flow_error_set
6465                                 (error, ENOTSUP,
6466                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
6467                                  "egress is not supported");
6468         }
6469         if (!(attributes->egress ^ attributes->ingress))
6470                 return rte_flow_error_set(error, ENOTSUP,
6471                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
6472                                           "must specify exactly one of "
6473                                           "ingress or egress");
6474         return ret;
6475 }
6476
6477 static uint16_t
6478 mlx5_flow_locate_proto_l3(const struct rte_flow_item **head,
6479                           const struct rte_flow_item *end)
6480 {
6481         const struct rte_flow_item *item = *head;
6482         uint16_t l3_protocol;
6483
6484         for (; item != end; item++) {
6485                 switch (item->type) {
6486                 default:
6487                         break;
6488                 case RTE_FLOW_ITEM_TYPE_IPV4:
6489                         l3_protocol = RTE_ETHER_TYPE_IPV4;
6490                         goto l3_ok;
6491                 case RTE_FLOW_ITEM_TYPE_IPV6:
6492                         l3_protocol = RTE_ETHER_TYPE_IPV6;
6493                         goto l3_ok;
6494                 case RTE_FLOW_ITEM_TYPE_ETH:
6495                         if (item->mask && item->spec) {
6496                                 MLX5_ETHER_TYPE_FROM_HEADER(rte_flow_item_eth,
6497                                                             type, item,
6498                                                             l3_protocol);
6499                                 if (l3_protocol == RTE_ETHER_TYPE_IPV4 ||
6500                                     l3_protocol == RTE_ETHER_TYPE_IPV6)
6501                                         goto l3_ok;
6502                         }
6503                         break;
6504                 case RTE_FLOW_ITEM_TYPE_VLAN:
6505                         if (item->mask && item->spec) {
6506                                 MLX5_ETHER_TYPE_FROM_HEADER(rte_flow_item_vlan,
6507                                                             inner_type, item,
6508                                                             l3_protocol);
6509                                 if (l3_protocol == RTE_ETHER_TYPE_IPV4 ||
6510                                     l3_protocol == RTE_ETHER_TYPE_IPV6)
6511                                         goto l3_ok;
6512                         }
6513                         break;
6514                 }
6515         }
6516         return 0;
6517 l3_ok:
6518         *head = item;
6519         return l3_protocol;
6520 }
6521
6522 static uint8_t
6523 mlx5_flow_locate_proto_l4(const struct rte_flow_item **head,
6524                           const struct rte_flow_item *end)
6525 {
6526         const struct rte_flow_item *item = *head;
6527         uint8_t l4_protocol;
6528
6529         for (; item != end; item++) {
6530                 switch (item->type) {
6531                 default:
6532                         break;
6533                 case RTE_FLOW_ITEM_TYPE_TCP:
6534                         l4_protocol = IPPROTO_TCP;
6535                         goto l4_ok;
6536                 case RTE_FLOW_ITEM_TYPE_UDP:
6537                         l4_protocol = IPPROTO_UDP;
6538                         goto l4_ok;
6539                 case RTE_FLOW_ITEM_TYPE_IPV4:
6540                         if (item->mask && item->spec) {
6541                                 const struct rte_flow_item_ipv4 *mask, *spec;
6542
6543                                 mask = (typeof(mask))item->mask;
6544                                 spec = (typeof(spec))item->spec;
6545                                 l4_protocol = mask->hdr.next_proto_id &
6546                                               spec->hdr.next_proto_id;
6547                                 if (l4_protocol == IPPROTO_TCP ||
6548                                     l4_protocol == IPPROTO_UDP)
6549                                         goto l4_ok;
6550                         }
6551                         break;
6552                 case RTE_FLOW_ITEM_TYPE_IPV6:
6553                         if (item->mask && item->spec) {
6554                                 const struct rte_flow_item_ipv6 *mask, *spec;
6555                                 mask = (typeof(mask))item->mask;
6556                                 spec = (typeof(spec))item->spec;
6557                                 l4_protocol = mask->hdr.proto & spec->hdr.proto;
6558                                 if (l4_protocol == IPPROTO_TCP ||
6559                                     l4_protocol == IPPROTO_UDP)
6560                                         goto l4_ok;
6561                         }
6562                         break;
6563                 }
6564         }
6565         return 0;
6566 l4_ok:
6567         *head = item;
6568         return l4_protocol;
6569 }
6570
6571 static int
6572 flow_dv_validate_item_integrity(struct rte_eth_dev *dev,
6573                                 const struct rte_flow_item *rule_items,
6574                                 const struct rte_flow_item *integrity_item,
6575                                 struct rte_flow_error *error)
6576 {
6577         struct mlx5_priv *priv = dev->data->dev_private;
6578         const struct rte_flow_item *tunnel_item, *end_item, *item = rule_items;
6579         const struct rte_flow_item_integrity *mask = (typeof(mask))
6580                                                      integrity_item->mask;
6581         const struct rte_flow_item_integrity *spec = (typeof(spec))
6582                                                      integrity_item->spec;
6583         uint32_t protocol;
6584
6585         if (!priv->config.hca_attr.pkt_integrity_match)
6586                 return rte_flow_error_set(error, ENOTSUP,
6587                                           RTE_FLOW_ERROR_TYPE_ITEM,
6588                                           integrity_item,
6589                                           "packet integrity integrity_item not supported");
6590         if (!mask)
6591                 mask = &rte_flow_item_integrity_mask;
6592         if (!mlx5_validate_integrity_item(mask))
6593                 return rte_flow_error_set(error, ENOTSUP,
6594                                           RTE_FLOW_ERROR_TYPE_ITEM,
6595                                           integrity_item,
6596                                           "unsupported integrity filter");
6597         tunnel_item = mlx5_flow_find_tunnel_item(rule_items);
6598         if (spec->level > 1) {
6599                 if (!tunnel_item)
6600                         return rte_flow_error_set(error, ENOTSUP,
6601                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6602                                                   integrity_item,
6603                                                   "missing tunnel item");
6604                 item = tunnel_item;
6605                 end_item = mlx5_find_end_item(tunnel_item);
6606         } else {
6607                 end_item = tunnel_item ? tunnel_item :
6608                            mlx5_find_end_item(integrity_item);
6609         }
6610         if (mask->l3_ok || mask->ipv4_csum_ok) {
6611                 protocol = mlx5_flow_locate_proto_l3(&item, end_item);
6612                 if (!protocol)
6613                         return rte_flow_error_set(error, EINVAL,
6614                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6615                                                   integrity_item,
6616                                                   "missing L3 protocol");
6617         }
6618         if (mask->l4_ok || mask->l4_csum_ok) {
6619                 protocol = mlx5_flow_locate_proto_l4(&item, end_item);
6620                 if (!protocol)
6621                         return rte_flow_error_set(error, EINVAL,
6622                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6623                                                   integrity_item,
6624                                                   "missing L4 protocol");
6625         }
6626         return 0;
6627 }
6628
6629 /**
6630  * Internal validation function. For validating both actions and items.
6631  *
6632  * @param[in] dev
6633  *   Pointer to the rte_eth_dev structure.
6634  * @param[in] attr
6635  *   Pointer to the flow attributes.
6636  * @param[in] items
6637  *   Pointer to the list of items.
6638  * @param[in] actions
6639  *   Pointer to the list of actions.
6640  * @param[in] external
6641  *   This flow rule is created by request external to PMD.
6642  * @param[in] hairpin
6643  *   Number of hairpin TX actions, 0 means classic flow.
6644  * @param[out] error
6645  *   Pointer to the error structure.
6646  *
6647  * @return
6648  *   0 on success, a negative errno value otherwise and rte_errno is set.
6649  */
6650 static int
6651 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
6652                  const struct rte_flow_item items[],
6653                  const struct rte_flow_action actions[],
6654                  bool external, int hairpin, struct rte_flow_error *error)
6655 {
6656         int ret;
6657         uint64_t action_flags = 0;
6658         uint64_t item_flags = 0;
6659         uint64_t last_item = 0;
6660         uint8_t next_protocol = 0xff;
6661         uint16_t ether_type = 0;
6662         int actions_n = 0;
6663         uint8_t item_ipv6_proto = 0;
6664         int fdb_mirror_limit = 0;
6665         int modify_after_mirror = 0;
6666         const struct rte_flow_item *geneve_item = NULL;
6667         const struct rte_flow_item *gre_item = NULL;
6668         const struct rte_flow_item *gtp_item = NULL;
6669         const struct rte_flow_action_raw_decap *decap;
6670         const struct rte_flow_action_raw_encap *encap;
6671         const struct rte_flow_action_rss *rss = NULL;
6672         const struct rte_flow_action_rss *sample_rss = NULL;
6673         const struct rte_flow_action_count *sample_count = NULL;
6674         const struct rte_flow_item_tcp nic_tcp_mask = {
6675                 .hdr = {
6676                         .tcp_flags = 0xFF,
6677                         .src_port = RTE_BE16(UINT16_MAX),
6678                         .dst_port = RTE_BE16(UINT16_MAX),
6679                 }
6680         };
6681         const struct rte_flow_item_ipv6 nic_ipv6_mask = {
6682                 .hdr = {
6683                         .src_addr =
6684                         "\xff\xff\xff\xff\xff\xff\xff\xff"
6685                         "\xff\xff\xff\xff\xff\xff\xff\xff",
6686                         .dst_addr =
6687                         "\xff\xff\xff\xff\xff\xff\xff\xff"
6688                         "\xff\xff\xff\xff\xff\xff\xff\xff",
6689                         .vtc_flow = RTE_BE32(0xffffffff),
6690                         .proto = 0xff,
6691                         .hop_limits = 0xff,
6692                 },
6693                 .has_frag_ext = 1,
6694         };
6695         const struct rte_flow_item_ecpri nic_ecpri_mask = {
6696                 .hdr = {
6697                         .common = {
6698                                 .u32 =
6699                                 RTE_BE32(((const struct rte_ecpri_common_hdr) {
6700                                         .type = 0xFF,
6701                                         }).u32),
6702                         },
6703                         .dummy[0] = 0xffffffff,
6704                 },
6705         };
6706         struct mlx5_priv *priv = dev->data->dev_private;
6707         struct mlx5_dev_config *dev_conf = &priv->config;
6708         uint16_t queue_index = 0xFFFF;
6709         const struct rte_flow_item_vlan *vlan_m = NULL;
6710         uint32_t rw_act_num = 0;
6711         uint64_t is_root;
6712         const struct mlx5_flow_tunnel *tunnel;
6713         enum mlx5_tof_rule_type tof_rule_type;
6714         struct flow_grp_info grp_info = {
6715                 .external = !!external,
6716                 .transfer = !!attr->transfer,
6717                 .fdb_def_rule = !!priv->fdb_def_rule,
6718                 .std_tbl_fix = true,
6719         };
6720         const struct rte_eth_hairpin_conf *conf;
6721         const struct rte_flow_item *rule_items = items;
6722         const struct rte_flow_item *port_id_item = NULL;
6723         bool def_policy = false;
6724
6725         if (items == NULL)
6726                 return -1;
6727         tunnel = is_tunnel_offload_active(dev) ?
6728                  mlx5_get_tof(items, actions, &tof_rule_type) : NULL;
6729         if (tunnel) {
6730                 if (priv->representor)
6731                         return rte_flow_error_set
6732                                 (error, ENOTSUP,
6733                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6734                                  NULL, "decap not supported for VF representor");
6735                 if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_SET_RULE)
6736                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
6737                 else if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_MATCH_RULE)
6738                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |
6739                                         MLX5_FLOW_ACTION_DECAP;
6740                 grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
6741                                         (dev, attr, tunnel, tof_rule_type);
6742         }
6743         ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error);
6744         if (ret < 0)
6745                 return ret;
6746         is_root = (uint64_t)ret;
6747         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
6748                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
6749                 int type = items->type;
6750
6751                 if (!mlx5_flow_os_item_supported(type))
6752                         return rte_flow_error_set(error, ENOTSUP,
6753                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6754                                                   NULL, "item not supported");
6755                 switch (type) {
6756                 case RTE_FLOW_ITEM_TYPE_VOID:
6757                         break;
6758                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
6759                         ret = flow_dv_validate_item_port_id
6760                                         (dev, items, attr, item_flags, error);
6761                         if (ret < 0)
6762                                 return ret;
6763                         last_item = MLX5_FLOW_ITEM_PORT_ID;
6764                         port_id_item = items;
6765                         break;
6766                 case RTE_FLOW_ITEM_TYPE_ETH:
6767                         ret = mlx5_flow_validate_item_eth(items, item_flags,
6768                                                           true, error);
6769                         if (ret < 0)
6770                                 return ret;
6771                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
6772                                              MLX5_FLOW_LAYER_OUTER_L2;
6773                         if (items->mask != NULL && items->spec != NULL) {
6774                                 ether_type =
6775                                         ((const struct rte_flow_item_eth *)
6776                                          items->spec)->type;
6777                                 ether_type &=
6778                                         ((const struct rte_flow_item_eth *)
6779                                          items->mask)->type;
6780                                 ether_type = rte_be_to_cpu_16(ether_type);
6781                         } else {
6782                                 ether_type = 0;
6783                         }
6784                         break;
6785                 case RTE_FLOW_ITEM_TYPE_VLAN:
6786                         ret = flow_dv_validate_item_vlan(items, item_flags,
6787                                                          dev, error);
6788                         if (ret < 0)
6789                                 return ret;
6790                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
6791                                              MLX5_FLOW_LAYER_OUTER_VLAN;
6792                         if (items->mask != NULL && items->spec != NULL) {
6793                                 ether_type =
6794                                         ((const struct rte_flow_item_vlan *)
6795                                          items->spec)->inner_type;
6796                                 ether_type &=
6797                                         ((const struct rte_flow_item_vlan *)
6798                                          items->mask)->inner_type;
6799                                 ether_type = rte_be_to_cpu_16(ether_type);
6800                         } else {
6801                                 ether_type = 0;
6802                         }
6803                         /* Store outer VLAN mask for of_push_vlan action. */
6804                         if (!tunnel)
6805                                 vlan_m = items->mask;
6806                         break;
6807                 case RTE_FLOW_ITEM_TYPE_IPV4:
6808                         mlx5_flow_tunnel_ip_check(items, next_protocol,
6809                                                   &item_flags, &tunnel);
6810                         ret = flow_dv_validate_item_ipv4(items, item_flags,
6811                                                          last_item, ether_type,
6812                                                          error);
6813                         if (ret < 0)
6814                                 return ret;
6815                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
6816                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
6817                         if (items->mask != NULL &&
6818                             ((const struct rte_flow_item_ipv4 *)
6819                              items->mask)->hdr.next_proto_id) {
6820                                 next_protocol =
6821                                         ((const struct rte_flow_item_ipv4 *)
6822                                          (items->spec))->hdr.next_proto_id;
6823                                 next_protocol &=
6824                                         ((const struct rte_flow_item_ipv4 *)
6825                                          (items->mask))->hdr.next_proto_id;
6826                         } else {
6827                                 /* Reset for inner layer. */
6828                                 next_protocol = 0xff;
6829                         }
6830                         break;
6831                 case RTE_FLOW_ITEM_TYPE_IPV6:
6832                         mlx5_flow_tunnel_ip_check(items, next_protocol,
6833                                                   &item_flags, &tunnel);
6834                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
6835                                                            last_item,
6836                                                            ether_type,
6837                                                            &nic_ipv6_mask,
6838                                                            error);
6839                         if (ret < 0)
6840                                 return ret;
6841                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
6842                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
6843                         if (items->mask != NULL &&
6844                             ((const struct rte_flow_item_ipv6 *)
6845                              items->mask)->hdr.proto) {
6846                                 item_ipv6_proto =
6847                                         ((const struct rte_flow_item_ipv6 *)
6848                                          items->spec)->hdr.proto;
6849                                 next_protocol =
6850                                         ((const struct rte_flow_item_ipv6 *)
6851                                          items->spec)->hdr.proto;
6852                                 next_protocol &=
6853                                         ((const struct rte_flow_item_ipv6 *)
6854                                          items->mask)->hdr.proto;
6855                         } else {
6856                                 /* Reset for inner layer. */
6857                                 next_protocol = 0xff;
6858                         }
6859                         break;
6860                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
6861                         ret = flow_dv_validate_item_ipv6_frag_ext(items,
6862                                                                   item_flags,
6863                                                                   error);
6864                         if (ret < 0)
6865                                 return ret;
6866                         last_item = tunnel ?
6867                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
6868                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
6869                         if (items->mask != NULL &&
6870                             ((const struct rte_flow_item_ipv6_frag_ext *)
6871                              items->mask)->hdr.next_header) {
6872                                 next_protocol =
6873                                 ((const struct rte_flow_item_ipv6_frag_ext *)
6874                                  items->spec)->hdr.next_header;
6875                                 next_protocol &=
6876                                 ((const struct rte_flow_item_ipv6_frag_ext *)
6877                                  items->mask)->hdr.next_header;
6878                         } else {
6879                                 /* Reset for inner layer. */
6880                                 next_protocol = 0xff;
6881                         }
6882                         break;
6883                 case RTE_FLOW_ITEM_TYPE_TCP:
6884                         ret = mlx5_flow_validate_item_tcp
6885                                                 (items, item_flags,
6886                                                  next_protocol,
6887                                                  &nic_tcp_mask,
6888                                                  error);
6889                         if (ret < 0)
6890                                 return ret;
6891                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
6892                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
6893                         break;
6894                 case RTE_FLOW_ITEM_TYPE_UDP:
6895                         ret = mlx5_flow_validate_item_udp(items, item_flags,
6896                                                           next_protocol,
6897                                                           error);
6898                         if (ret < 0)
6899                                 return ret;
6900                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
6901                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
6902                         break;
6903                 case RTE_FLOW_ITEM_TYPE_GRE:
6904                         ret = mlx5_flow_validate_item_gre(items, item_flags,
6905                                                           next_protocol, error);
6906                         if (ret < 0)
6907                                 return ret;
6908                         gre_item = items;
6909                         last_item = MLX5_FLOW_LAYER_GRE;
6910                         break;
6911                 case RTE_FLOW_ITEM_TYPE_NVGRE:
6912                         ret = mlx5_flow_validate_item_nvgre(items, item_flags,
6913                                                             next_protocol,
6914                                                             error);
6915                         if (ret < 0)
6916                                 return ret;
6917                         last_item = MLX5_FLOW_LAYER_NVGRE;
6918                         break;
6919                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
6920                         ret = mlx5_flow_validate_item_gre_key
6921                                 (items, item_flags, gre_item, error);
6922                         if (ret < 0)
6923                                 return ret;
6924                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
6925                         break;
6926                 case RTE_FLOW_ITEM_TYPE_VXLAN:
6927                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
6928                                                             error);
6929                         if (ret < 0)
6930                                 return ret;
6931                         last_item = MLX5_FLOW_LAYER_VXLAN;
6932                         break;
6933                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
6934                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
6935                                                                 item_flags, dev,
6936                                                                 error);
6937                         if (ret < 0)
6938                                 return ret;
6939                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
6940                         break;
6941                 case RTE_FLOW_ITEM_TYPE_GENEVE:
6942                         ret = mlx5_flow_validate_item_geneve(items,
6943                                                              item_flags, dev,
6944                                                              error);
6945                         if (ret < 0)
6946                                 return ret;
6947                         geneve_item = items;
6948                         last_item = MLX5_FLOW_LAYER_GENEVE;
6949                         break;
6950                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
6951                         ret = mlx5_flow_validate_item_geneve_opt(items,
6952                                                                  last_item,
6953                                                                  geneve_item,
6954                                                                  dev,
6955                                                                  error);
6956                         if (ret < 0)
6957                                 return ret;
6958                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
6959                         break;
6960                 case RTE_FLOW_ITEM_TYPE_MPLS:
6961                         ret = mlx5_flow_validate_item_mpls(dev, items,
6962                                                            item_flags,
6963                                                            last_item, error);
6964                         if (ret < 0)
6965                                 return ret;
6966                         last_item = MLX5_FLOW_LAYER_MPLS;
6967                         break;
6968
6969                 case RTE_FLOW_ITEM_TYPE_MARK:
6970                         ret = flow_dv_validate_item_mark(dev, items, attr,
6971                                                          error);
6972                         if (ret < 0)
6973                                 return ret;
6974                         last_item = MLX5_FLOW_ITEM_MARK;
6975                         break;
6976                 case RTE_FLOW_ITEM_TYPE_META:
6977                         ret = flow_dv_validate_item_meta(dev, items, attr,
6978                                                          error);
6979                         if (ret < 0)
6980                                 return ret;
6981                         last_item = MLX5_FLOW_ITEM_METADATA;
6982                         break;
6983                 case RTE_FLOW_ITEM_TYPE_ICMP:
6984                         ret = mlx5_flow_validate_item_icmp(items, item_flags,
6985                                                            next_protocol,
6986                                                            error);
6987                         if (ret < 0)
6988                                 return ret;
6989                         last_item = MLX5_FLOW_LAYER_ICMP;
6990                         break;
6991                 case RTE_FLOW_ITEM_TYPE_ICMP6:
6992                         ret = mlx5_flow_validate_item_icmp6(items, item_flags,
6993                                                             next_protocol,
6994                                                             error);
6995                         if (ret < 0)
6996                                 return ret;
6997                         item_ipv6_proto = IPPROTO_ICMPV6;
6998                         last_item = MLX5_FLOW_LAYER_ICMP6;
6999                         break;
7000                 case RTE_FLOW_ITEM_TYPE_TAG:
7001                         ret = flow_dv_validate_item_tag(dev, items,
7002                                                         attr, error);
7003                         if (ret < 0)
7004                                 return ret;
7005                         last_item = MLX5_FLOW_ITEM_TAG;
7006                         break;
7007                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
7008                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
7009                         break;
7010                 case RTE_FLOW_ITEM_TYPE_GTP:
7011                         ret = flow_dv_validate_item_gtp(dev, items, item_flags,
7012                                                         error);
7013                         if (ret < 0)
7014                                 return ret;
7015                         gtp_item = items;
7016                         last_item = MLX5_FLOW_LAYER_GTP;
7017                         break;
7018                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
7019                         ret = flow_dv_validate_item_gtp_psc(items, last_item,
7020                                                             gtp_item, attr,
7021                                                             error);
7022                         if (ret < 0)
7023                                 return ret;
7024                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
7025                         break;
7026                 case RTE_FLOW_ITEM_TYPE_ECPRI:
7027                         /* Capacity will be checked in the translate stage. */
7028                         ret = mlx5_flow_validate_item_ecpri(items, item_flags,
7029                                                             last_item,
7030                                                             ether_type,
7031                                                             &nic_ecpri_mask,
7032                                                             error);
7033                         if (ret < 0)
7034                                 return ret;
7035                         last_item = MLX5_FLOW_LAYER_ECPRI;
7036                         break;
7037                 case RTE_FLOW_ITEM_TYPE_INTEGRITY:
7038                         if (item_flags & MLX5_FLOW_ITEM_INTEGRITY)
7039                                 return rte_flow_error_set
7040                                         (error, ENOTSUP,
7041                                          RTE_FLOW_ERROR_TYPE_ITEM,
7042                                          NULL, "multiple integrity items not supported");
7043                         ret = flow_dv_validate_item_integrity(dev, rule_items,
7044                                                               items, error);
7045                         if (ret < 0)
7046                                 return ret;
7047                         last_item = MLX5_FLOW_ITEM_INTEGRITY;
7048                         break;
7049                 case RTE_FLOW_ITEM_TYPE_CONNTRACK:
7050                         ret = flow_dv_validate_item_aso_ct(dev, items,
7051                                                            &item_flags, error);
7052                         if (ret < 0)
7053                                 return ret;
7054                         break;
7055                 case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
7056                         /* tunnel offload item was processed before
7057                          * list it here as a supported type
7058                          */
7059                         break;
7060                 default:
7061                         return rte_flow_error_set(error, ENOTSUP,
7062                                                   RTE_FLOW_ERROR_TYPE_ITEM,
7063                                                   NULL, "item not supported");
7064                 }
7065                 item_flags |= last_item;
7066         }
7067         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
7068                 int type = actions->type;
7069                 bool shared_count = false;
7070
7071                 if (!mlx5_flow_os_action_supported(type))
7072                         return rte_flow_error_set(error, ENOTSUP,
7073                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7074                                                   actions,
7075                                                   "action not supported");
7076                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
7077                         return rte_flow_error_set(error, ENOTSUP,
7078                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7079                                                   actions, "too many actions");
7080                 if (action_flags &
7081                         MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
7082                         return rte_flow_error_set(error, ENOTSUP,
7083                                 RTE_FLOW_ERROR_TYPE_ACTION,
7084                                 NULL, "meter action with policy "
7085                                 "must be the last action");
7086                 switch (type) {
7087                 case RTE_FLOW_ACTION_TYPE_VOID:
7088                         break;
7089                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
7090                         ret = flow_dv_validate_action_port_id(dev,
7091                                                               action_flags,
7092                                                               actions,
7093                                                               attr,
7094                                                               error);
7095                         if (ret)
7096                                 return ret;
7097                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
7098                         ++actions_n;
7099                         break;
7100                 case RTE_FLOW_ACTION_TYPE_FLAG:
7101                         ret = flow_dv_validate_action_flag(dev, action_flags,
7102                                                            attr, error);
7103                         if (ret < 0)
7104                                 return ret;
7105                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
7106                                 /* Count all modify-header actions as one. */
7107                                 if (!(action_flags &
7108                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
7109                                         ++actions_n;
7110                                 action_flags |= MLX5_FLOW_ACTION_FLAG |
7111                                                 MLX5_FLOW_ACTION_MARK_EXT;
7112                                 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7113                                         modify_after_mirror = 1;
7114
7115                         } else {
7116                                 action_flags |= MLX5_FLOW_ACTION_FLAG;
7117                                 ++actions_n;
7118                         }
7119                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
7120                         break;
7121                 case RTE_FLOW_ACTION_TYPE_MARK:
7122                         ret = flow_dv_validate_action_mark(dev, actions,
7123                                                            action_flags,
7124                                                            attr, error);
7125                         if (ret < 0)
7126                                 return ret;
7127                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
7128                                 /* Count all modify-header actions as one. */
7129                                 if (!(action_flags &
7130                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
7131                                         ++actions_n;
7132                                 action_flags |= MLX5_FLOW_ACTION_MARK |
7133                                                 MLX5_FLOW_ACTION_MARK_EXT;
7134                                 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7135                                         modify_after_mirror = 1;
7136                         } else {
7137                                 action_flags |= MLX5_FLOW_ACTION_MARK;
7138                                 ++actions_n;
7139                         }
7140                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
7141                         break;
7142                 case RTE_FLOW_ACTION_TYPE_SET_META:
7143                         ret = flow_dv_validate_action_set_meta(dev, actions,
7144                                                                action_flags,
7145                                                                attr, error);
7146                         if (ret < 0)
7147                                 return ret;
7148                         /* Count all modify-header actions as one action. */
7149                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7150                                 ++actions_n;
7151                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7152                                 modify_after_mirror = 1;
7153                         action_flags |= MLX5_FLOW_ACTION_SET_META;
7154                         rw_act_num += MLX5_ACT_NUM_SET_META;
7155                         break;
7156                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
7157                         ret = flow_dv_validate_action_set_tag(dev, actions,
7158                                                               action_flags,
7159                                                               attr, error);
7160                         if (ret < 0)
7161                                 return ret;
7162                         /* Count all modify-header actions as one action. */
7163                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7164                                 ++actions_n;
7165                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7166                                 modify_after_mirror = 1;
7167                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
7168                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7169                         break;
7170                 case RTE_FLOW_ACTION_TYPE_DROP:
7171                         ret = mlx5_flow_validate_action_drop(action_flags,
7172                                                              attr, error);
7173                         if (ret < 0)
7174                                 return ret;
7175                         action_flags |= MLX5_FLOW_ACTION_DROP;
7176                         ++actions_n;
7177                         break;
7178                 case RTE_FLOW_ACTION_TYPE_QUEUE:
7179                         ret = mlx5_flow_validate_action_queue(actions,
7180                                                               action_flags, dev,
7181                                                               attr, error);
7182                         if (ret < 0)
7183                                 return ret;
7184                         queue_index = ((const struct rte_flow_action_queue *)
7185                                                         (actions->conf))->index;
7186                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
7187                         ++actions_n;
7188                         break;
7189                 case RTE_FLOW_ACTION_TYPE_RSS:
7190                         rss = actions->conf;
7191                         ret = mlx5_flow_validate_action_rss(actions,
7192                                                             action_flags, dev,
7193                                                             attr, item_flags,
7194                                                             error);
7195                         if (ret < 0)
7196                                 return ret;
7197                         if (rss && sample_rss &&
7198                             (sample_rss->level != rss->level ||
7199                             sample_rss->types != rss->types))
7200                                 return rte_flow_error_set(error, ENOTSUP,
7201                                         RTE_FLOW_ERROR_TYPE_ACTION,
7202                                         NULL,
7203                                         "Can't use the different RSS types "
7204                                         "or level in the same flow");
7205                         if (rss != NULL && rss->queue_num)
7206                                 queue_index = rss->queue[0];
7207                         action_flags |= MLX5_FLOW_ACTION_RSS;
7208                         ++actions_n;
7209                         break;
7210                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
7211                         ret =
7212                         mlx5_flow_validate_action_default_miss(action_flags,
7213                                         attr, error);
7214                         if (ret < 0)
7215                                 return ret;
7216                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
7217                         ++actions_n;
7218                         break;
7219                 case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
7220                 case RTE_FLOW_ACTION_TYPE_COUNT:
7221                         shared_count = is_shared_action_count(actions);
7222                         ret = flow_dv_validate_action_count(dev, shared_count,
7223                                                             action_flags,
7224                                                             error);
7225                         if (ret < 0)
7226                                 return ret;
7227                         action_flags |= MLX5_FLOW_ACTION_COUNT;
7228                         ++actions_n;
7229                         break;
7230                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
7231                         if (flow_dv_validate_action_pop_vlan(dev,
7232                                                              action_flags,
7233                                                              actions,
7234                                                              item_flags, attr,
7235                                                              error))
7236                                 return -rte_errno;
7237                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7238                                 modify_after_mirror = 1;
7239                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
7240                         ++actions_n;
7241                         break;
7242                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
7243                         ret = flow_dv_validate_action_push_vlan(dev,
7244                                                                 action_flags,
7245                                                                 vlan_m,
7246                                                                 actions, attr,
7247                                                                 error);
7248                         if (ret < 0)
7249                                 return ret;
7250                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7251                                 modify_after_mirror = 1;
7252                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
7253                         ++actions_n;
7254                         break;
7255                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
7256                         ret = flow_dv_validate_action_set_vlan_pcp
7257                                                 (action_flags, actions, error);
7258                         if (ret < 0)
7259                                 return ret;
7260                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7261                                 modify_after_mirror = 1;
7262                         /* Count PCP with push_vlan command. */
7263                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
7264                         break;
7265                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
7266                         ret = flow_dv_validate_action_set_vlan_vid
7267                                                 (item_flags, action_flags,
7268                                                  actions, error);
7269                         if (ret < 0)
7270                                 return ret;
7271                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7272                                 modify_after_mirror = 1;
7273                         /* Count VID with push_vlan command. */
7274                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
7275                         rw_act_num += MLX5_ACT_NUM_MDF_VID;
7276                         break;
7277                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
7278                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
7279                         ret = flow_dv_validate_action_l2_encap(dev,
7280                                                                action_flags,
7281                                                                actions, attr,
7282                                                                error);
7283                         if (ret < 0)
7284                                 return ret;
7285                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
7286                         ++actions_n;
7287                         break;
7288                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
7289                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
7290                         ret = flow_dv_validate_action_decap(dev, action_flags,
7291                                                             actions, item_flags,
7292                                                             attr, error);
7293                         if (ret < 0)
7294                                 return ret;
7295                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7296                                 modify_after_mirror = 1;
7297                         action_flags |= MLX5_FLOW_ACTION_DECAP;
7298                         ++actions_n;
7299                         break;
7300                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
7301                         ret = flow_dv_validate_action_raw_encap_decap
7302                                 (dev, NULL, actions->conf, attr, &action_flags,
7303                                  &actions_n, actions, item_flags, error);
7304                         if (ret < 0)
7305                                 return ret;
7306                         break;
7307                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
7308                         decap = actions->conf;
7309                         while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
7310                                 ;
7311                         if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
7312                                 encap = NULL;
7313                                 actions--;
7314                         } else {
7315                                 encap = actions->conf;
7316                         }
7317                         ret = flow_dv_validate_action_raw_encap_decap
7318                                            (dev,
7319                                             decap ? decap : &empty_decap, encap,
7320                                             attr, &action_flags, &actions_n,
7321                                             actions, item_flags, error);
7322                         if (ret < 0)
7323                                 return ret;
7324                         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
7325                             (action_flags & MLX5_FLOW_ACTION_DECAP))
7326                                 modify_after_mirror = 1;
7327                         break;
7328                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
7329                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
7330                         ret = flow_dv_validate_action_modify_mac(action_flags,
7331                                                                  actions,
7332                                                                  item_flags,
7333                                                                  error);
7334                         if (ret < 0)
7335                                 return ret;
7336                         /* Count all modify-header actions as one action. */
7337                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7338                                 ++actions_n;
7339                         action_flags |= actions->type ==
7340                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
7341                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
7342                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
7343                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7344                                 modify_after_mirror = 1;
7345                         /*
7346                          * Even if the source and destination MAC addresses have
7347                          * overlap in the header with 4B alignment, the convert
7348                          * function will handle them separately and 4 SW actions
7349                          * will be created. And 2 actions will be added each
7350                          * time no matter how many bytes of address will be set.
7351                          */
7352                         rw_act_num += MLX5_ACT_NUM_MDF_MAC;
7353                         break;
7354                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
7355                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
7356                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
7357                                                                   actions,
7358                                                                   item_flags,
7359                                                                   error);
7360                         if (ret < 0)
7361                                 return ret;
7362                         /* Count all modify-header actions as one action. */
7363                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7364                                 ++actions_n;
7365                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7366                                 modify_after_mirror = 1;
7367                         action_flags |= actions->type ==
7368                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
7369                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
7370                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
7371                         rw_act_num += MLX5_ACT_NUM_MDF_IPV4;
7372                         break;
7373                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
7374                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
7375                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
7376                                                                   actions,
7377                                                                   item_flags,
7378                                                                   error);
7379                         if (ret < 0)
7380                                 return ret;
7381                         if (item_ipv6_proto == IPPROTO_ICMPV6)
7382                                 return rte_flow_error_set(error, ENOTSUP,
7383                                         RTE_FLOW_ERROR_TYPE_ACTION,
7384                                         actions,
7385                                         "Can't change header "
7386                                         "with ICMPv6 proto");
7387                         /* Count all modify-header actions as one action. */
7388                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7389                                 ++actions_n;
7390                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7391                                 modify_after_mirror = 1;
7392                         action_flags |= actions->type ==
7393                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
7394                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
7395                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
7396                         rw_act_num += MLX5_ACT_NUM_MDF_IPV6;
7397                         break;
7398                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
7399                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
7400                         ret = flow_dv_validate_action_modify_tp(action_flags,
7401                                                                 actions,
7402                                                                 item_flags,
7403                                                                 error);
7404                         if (ret < 0)
7405                                 return ret;
7406                         /* Count all modify-header actions as one action. */
7407                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7408                                 ++actions_n;
7409                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7410                                 modify_after_mirror = 1;
7411                         action_flags |= actions->type ==
7412                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
7413                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
7414                                                 MLX5_FLOW_ACTION_SET_TP_DST;
7415                         rw_act_num += MLX5_ACT_NUM_MDF_PORT;
7416                         break;
7417                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
7418                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
7419                         ret = flow_dv_validate_action_modify_ttl(action_flags,
7420                                                                  actions,
7421                                                                  item_flags,
7422                                                                  error);
7423                         if (ret < 0)
7424                                 return ret;
7425                         /* Count all modify-header actions as one action. */
7426                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7427                                 ++actions_n;
7428                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7429                                 modify_after_mirror = 1;
7430                         action_flags |= actions->type ==
7431                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
7432                                                 MLX5_FLOW_ACTION_SET_TTL :
7433                                                 MLX5_FLOW_ACTION_DEC_TTL;
7434                         rw_act_num += MLX5_ACT_NUM_MDF_TTL;
7435                         break;
7436                 case RTE_FLOW_ACTION_TYPE_JUMP:
7437                         ret = flow_dv_validate_action_jump(dev, tunnel, actions,
7438                                                            action_flags,
7439                                                            attr, external,
7440                                                            error);
7441                         if (ret)
7442                                 return ret;
7443                         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
7444                             fdb_mirror_limit)
7445                                 return rte_flow_error_set(error, EINVAL,
7446                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7447                                                   NULL,
7448                                                   "sample and jump action combination is not supported");
7449                         ++actions_n;
7450                         action_flags |= MLX5_FLOW_ACTION_JUMP;
7451                         break;
7452                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
7453                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
7454                         ret = flow_dv_validate_action_modify_tcp_seq
7455                                                                 (action_flags,
7456                                                                  actions,
7457                                                                  item_flags,
7458                                                                  error);
7459                         if (ret < 0)
7460                                 return ret;
7461                         /* Count all modify-header actions as one action. */
7462                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7463                                 ++actions_n;
7464                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7465                                 modify_after_mirror = 1;
7466                         action_flags |= actions->type ==
7467                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
7468                                                 MLX5_FLOW_ACTION_INC_TCP_SEQ :
7469                                                 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
7470                         rw_act_num += MLX5_ACT_NUM_MDF_TCPSEQ;
7471                         break;
7472                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
7473                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
7474                         ret = flow_dv_validate_action_modify_tcp_ack
7475                                                                 (action_flags,
7476                                                                  actions,
7477                                                                  item_flags,
7478                                                                  error);
7479                         if (ret < 0)
7480                                 return ret;
7481                         /* Count all modify-header actions as one action. */
7482                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7483                                 ++actions_n;
7484                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7485                                 modify_after_mirror = 1;
7486                         action_flags |= actions->type ==
7487                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
7488                                                 MLX5_FLOW_ACTION_INC_TCP_ACK :
7489                                                 MLX5_FLOW_ACTION_DEC_TCP_ACK;
7490                         rw_act_num += MLX5_ACT_NUM_MDF_TCPACK;
7491                         break;
7492                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
7493                         break;
7494                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
7495                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
7496                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7497                         break;
7498                 case RTE_FLOW_ACTION_TYPE_METER:
7499                         ret = mlx5_flow_validate_action_meter(dev,
7500                                                               action_flags,
7501                                                               actions, attr,
7502                                                               port_id_item,
7503                                                               &def_policy,
7504                                                               error);
7505                         if (ret < 0)
7506                                 return ret;
7507                         action_flags |= MLX5_FLOW_ACTION_METER;
7508                         if (!def_policy)
7509                                 action_flags |=
7510                                 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
7511                         ++actions_n;
7512                         /* Meter action will add one more TAG action. */
7513                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7514                         break;
7515                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
7516                         if (!attr->transfer && !attr->group)
7517                                 return rte_flow_error_set(error, ENOTSUP,
7518                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7519                                                                            NULL,
7520                           "Shared ASO age action is not supported for group 0");
7521                         if (action_flags & MLX5_FLOW_ACTION_AGE)
7522                                 return rte_flow_error_set
7523                                                   (error, EINVAL,
7524                                                    RTE_FLOW_ERROR_TYPE_ACTION,
7525                                                    NULL,
7526                                                    "duplicate age actions set");
7527                         action_flags |= MLX5_FLOW_ACTION_AGE;
7528                         ++actions_n;
7529                         break;
7530                 case RTE_FLOW_ACTION_TYPE_AGE:
7531                         ret = flow_dv_validate_action_age(action_flags,
7532                                                           actions, dev,
7533                                                           error);
7534                         if (ret < 0)
7535                                 return ret;
7536                         /*
7537                          * Validate the regular AGE action (using counter)
7538                          * mutual exclusion with share counter actions.
7539                          */
7540                         if (!priv->sh->flow_hit_aso_en) {
7541                                 if (shared_count)
7542                                         return rte_flow_error_set
7543                                                 (error, EINVAL,
7544                                                 RTE_FLOW_ERROR_TYPE_ACTION,
7545                                                 NULL,
7546                                                 "old age and shared count combination is not supported");
7547                                 if (sample_count)
7548                                         return rte_flow_error_set
7549                                                 (error, EINVAL,
7550                                                 RTE_FLOW_ERROR_TYPE_ACTION,
7551                                                 NULL,
7552                                                 "old age action and count must be in the same sub flow");
7553                         }
7554                         action_flags |= MLX5_FLOW_ACTION_AGE;
7555                         ++actions_n;
7556                         break;
7557                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
7558                         ret = flow_dv_validate_action_modify_ipv4_dscp
7559                                                          (action_flags,
7560                                                           actions,
7561                                                           item_flags,
7562                                                           error);
7563                         if (ret < 0)
7564                                 return ret;
7565                         /* Count all modify-header actions as one action. */
7566                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7567                                 ++actions_n;
7568                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7569                                 modify_after_mirror = 1;
7570                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
7571                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
7572                         break;
7573                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
7574                         ret = flow_dv_validate_action_modify_ipv6_dscp
7575                                                                 (action_flags,
7576                                                                  actions,
7577                                                                  item_flags,
7578                                                                  error);
7579                         if (ret < 0)
7580                                 return ret;
7581                         /* Count all modify-header actions as one action. */
7582                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7583                                 ++actions_n;
7584                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7585                                 modify_after_mirror = 1;
7586                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
7587                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
7588                         break;
7589                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
7590                         ret = flow_dv_validate_action_sample(&action_flags,
7591                                                              actions, dev,
7592                                                              attr, item_flags,
7593                                                              rss, &sample_rss,
7594                                                              &sample_count,
7595                                                              &fdb_mirror_limit,
7596                                                              error);
7597                         if (ret < 0)
7598                                 return ret;
7599                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
7600                         ++actions_n;
7601                         break;
7602                 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
7603                         ret = flow_dv_validate_action_modify_field(dev,
7604                                                                    action_flags,
7605                                                                    actions,
7606                                                                    attr,
7607                                                                    error);
7608                         if (ret < 0)
7609                                 return ret;
7610                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7611                                 modify_after_mirror = 1;
7612                         /* Count all modify-header actions as one action. */
7613                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7614                                 ++actions_n;
7615                         action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
7616                         rw_act_num += ret;
7617                         break;
7618                 case RTE_FLOW_ACTION_TYPE_CONNTRACK:
7619                         ret = flow_dv_validate_action_aso_ct(dev, action_flags,
7620                                                              item_flags, attr,
7621                                                              error);
7622                         if (ret < 0)
7623                                 return ret;
7624                         action_flags |= MLX5_FLOW_ACTION_CT;
7625                         break;
7626                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
7627                         /* tunnel offload action was processed before
7628                          * list it here as a supported type
7629                          */
7630                         break;
7631                 default:
7632                         return rte_flow_error_set(error, ENOTSUP,
7633                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7634                                                   actions,
7635                                                   "action not supported");
7636                 }
7637         }
7638         /*
7639          * Validate actions in flow rules
7640          * - Explicit decap action is prohibited by the tunnel offload API.
7641          * - Drop action in tunnel steer rule is prohibited by the API.
7642          * - Application cannot use MARK action because it's value can mask
7643          *   tunnel default miss nitification.
7644          * - JUMP in tunnel match rule has no support in current PMD
7645          *   implementation.
7646          * - TAG & META are reserved for future uses.
7647          */
7648         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_SET) {
7649                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_DECAP    |
7650                                             MLX5_FLOW_ACTION_MARK     |
7651                                             MLX5_FLOW_ACTION_SET_TAG  |
7652                                             MLX5_FLOW_ACTION_SET_META |
7653                                             MLX5_FLOW_ACTION_DROP;
7654
7655                 if (action_flags & bad_actions_mask)
7656                         return rte_flow_error_set
7657                                         (error, EINVAL,
7658                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7659                                         "Invalid RTE action in tunnel "
7660                                         "set decap rule");
7661                 if (!(action_flags & MLX5_FLOW_ACTION_JUMP))
7662                         return rte_flow_error_set
7663                                         (error, EINVAL,
7664                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7665                                         "tunnel set decap rule must terminate "
7666                                         "with JUMP");
7667                 if (!attr->ingress)
7668                         return rte_flow_error_set
7669                                         (error, EINVAL,
7670                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7671                                         "tunnel flows for ingress traffic only");
7672         }
7673         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_MATCH) {
7674                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_JUMP    |
7675                                             MLX5_FLOW_ACTION_MARK    |
7676                                             MLX5_FLOW_ACTION_SET_TAG |
7677                                             MLX5_FLOW_ACTION_SET_META;
7678
7679                 if (action_flags & bad_actions_mask)
7680                         return rte_flow_error_set
7681                                         (error, EINVAL,
7682                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7683                                         "Invalid RTE action in tunnel "
7684                                         "set match rule");
7685         }
7686         /*
7687          * Validate the drop action mutual exclusion with other actions.
7688          * Drop action is mutually-exclusive with any other action, except for
7689          * Count action.
7690          * Drop action compatibility with tunnel offload was already validated.
7691          */
7692         if (action_flags & (MLX5_FLOW_ACTION_TUNNEL_MATCH |
7693                             MLX5_FLOW_ACTION_TUNNEL_MATCH));
7694         else if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
7695             (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
7696                 return rte_flow_error_set(error, EINVAL,
7697                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7698                                           "Drop action is mutually-exclusive "
7699                                           "with any other action, except for "
7700                                           "Count action");
7701         /* Eswitch has few restrictions on using items and actions */
7702         if (attr->transfer) {
7703                 if (!mlx5_flow_ext_mreg_supported(dev) &&
7704                     action_flags & MLX5_FLOW_ACTION_FLAG)
7705                         return rte_flow_error_set(error, ENOTSUP,
7706                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7707                                                   NULL,
7708                                                   "unsupported action FLAG");
7709                 if (!mlx5_flow_ext_mreg_supported(dev) &&
7710                     action_flags & MLX5_FLOW_ACTION_MARK)
7711                         return rte_flow_error_set(error, ENOTSUP,
7712                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7713                                                   NULL,
7714                                                   "unsupported action MARK");
7715                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
7716                         return rte_flow_error_set(error, ENOTSUP,
7717                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7718                                                   NULL,
7719                                                   "unsupported action QUEUE");
7720                 if (action_flags & MLX5_FLOW_ACTION_RSS)
7721                         return rte_flow_error_set(error, ENOTSUP,
7722                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7723                                                   NULL,
7724                                                   "unsupported action RSS");
7725                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
7726                         return rte_flow_error_set(error, EINVAL,
7727                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7728                                                   actions,
7729                                                   "no fate action is found");
7730         } else {
7731                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
7732                         return rte_flow_error_set(error, EINVAL,
7733                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7734                                                   actions,
7735                                                   "no fate action is found");
7736         }
7737         /*
7738          * Continue validation for Xcap and VLAN actions.
7739          * If hairpin is working in explicit TX rule mode, there is no actions
7740          * splitting and the validation of hairpin ingress flow should be the
7741          * same as other standard flows.
7742          */
7743         if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS |
7744                              MLX5_FLOW_VLAN_ACTIONS)) &&
7745             (queue_index == 0xFFFF ||
7746              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN ||
7747              ((conf = mlx5_rxq_get_hairpin_conf(dev, queue_index)) != NULL &&
7748              conf->tx_explicit != 0))) {
7749                 if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
7750                     MLX5_FLOW_XCAP_ACTIONS)
7751                         return rte_flow_error_set(error, ENOTSUP,
7752                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7753                                                   NULL, "encap and decap "
7754                                                   "combination aren't supported");
7755                 if (!attr->transfer && attr->ingress) {
7756                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
7757                                 return rte_flow_error_set
7758                                                 (error, ENOTSUP,
7759                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7760                                                  NULL, "encap is not supported"
7761                                                  " for ingress traffic");
7762                         else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
7763                                 return rte_flow_error_set
7764                                                 (error, ENOTSUP,
7765                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7766                                                  NULL, "push VLAN action not "
7767                                                  "supported for ingress");
7768                         else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) ==
7769                                         MLX5_FLOW_VLAN_ACTIONS)
7770                                 return rte_flow_error_set
7771                                                 (error, ENOTSUP,
7772                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7773                                                  NULL, "no support for "
7774                                                  "multiple VLAN actions");
7775                 }
7776         }
7777         if (action_flags & MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY) {
7778                 if ((action_flags & (MLX5_FLOW_FATE_ACTIONS &
7779                         ~MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)) &&
7780                         attr->ingress)
7781                         return rte_flow_error_set
7782                                 (error, ENOTSUP,
7783                                 RTE_FLOW_ERROR_TYPE_ACTION,
7784                                 NULL, "fate action not supported for "
7785                                 "meter with policy");
7786                 if (attr->egress) {
7787                         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
7788                                 return rte_flow_error_set
7789                                         (error, ENOTSUP,
7790                                         RTE_FLOW_ERROR_TYPE_ACTION,
7791                                         NULL, "modify header action in egress "
7792                                         "cannot be done before meter action");
7793                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
7794                                 return rte_flow_error_set
7795                                         (error, ENOTSUP,
7796                                         RTE_FLOW_ERROR_TYPE_ACTION,
7797                                         NULL, "encap action in egress "
7798                                         "cannot be done before meter action");
7799                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
7800                                 return rte_flow_error_set
7801                                         (error, ENOTSUP,
7802                                         RTE_FLOW_ERROR_TYPE_ACTION,
7803                                         NULL, "push vlan action in egress "
7804                                         "cannot be done before meter action");
7805                 }
7806         }
7807         /*
7808          * Hairpin flow will add one more TAG action in TX implicit mode.
7809          * In TX explicit mode, there will be no hairpin flow ID.
7810          */
7811         if (hairpin > 0)
7812                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
7813         /* extra metadata enabled: one more TAG action will be add. */
7814         if (dev_conf->dv_flow_en &&
7815             dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
7816             mlx5_flow_ext_mreg_supported(dev))
7817                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
7818         if (rw_act_num >
7819                         flow_dv_modify_hdr_action_max(dev, is_root)) {
7820                 return rte_flow_error_set(error, ENOTSUP,
7821                                           RTE_FLOW_ERROR_TYPE_ACTION,
7822                                           NULL, "too many header modify"
7823                                           " actions to support");
7824         }
7825         /* Eswitch egress mirror and modify flow has limitation on CX5 */
7826         if (fdb_mirror_limit && modify_after_mirror)
7827                 return rte_flow_error_set(error, EINVAL,
7828                                 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7829                                 "sample before modify action is not supported");
7830         return 0;
7831 }
7832
7833 /**
7834  * Internal preparation function. Allocates the DV flow size,
7835  * this size is constant.
7836  *
7837  * @param[in] dev
7838  *   Pointer to the rte_eth_dev structure.
7839  * @param[in] attr
7840  *   Pointer to the flow attributes.
7841  * @param[in] items
7842  *   Pointer to the list of items.
7843  * @param[in] actions
7844  *   Pointer to the list of actions.
7845  * @param[out] error
7846  *   Pointer to the error structure.
7847  *
7848  * @return
7849  *   Pointer to mlx5_flow object on success,
7850  *   otherwise NULL and rte_errno is set.
7851  */
7852 static struct mlx5_flow *
7853 flow_dv_prepare(struct rte_eth_dev *dev,
7854                 const struct rte_flow_attr *attr __rte_unused,
7855                 const struct rte_flow_item items[] __rte_unused,
7856                 const struct rte_flow_action actions[] __rte_unused,
7857                 struct rte_flow_error *error)
7858 {
7859         uint32_t handle_idx = 0;
7860         struct mlx5_flow *dev_flow;
7861         struct mlx5_flow_handle *dev_handle;
7862         struct mlx5_priv *priv = dev->data->dev_private;
7863         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
7864
7865         MLX5_ASSERT(wks);
7866         wks->skip_matcher_reg = 0;
7867         /* In case of corrupting the memory. */
7868         if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
7869                 rte_flow_error_set(error, ENOSPC,
7870                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7871                                    "not free temporary device flow");
7872                 return NULL;
7873         }
7874         dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
7875                                    &handle_idx);
7876         if (!dev_handle) {
7877                 rte_flow_error_set(error, ENOMEM,
7878                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7879                                    "not enough memory to create flow handle");
7880                 return NULL;
7881         }
7882         MLX5_ASSERT(wks->flow_idx < RTE_DIM(wks->flows));
7883         dev_flow = &wks->flows[wks->flow_idx++];
7884         memset(dev_flow, 0, sizeof(*dev_flow));
7885         dev_flow->handle = dev_handle;
7886         dev_flow->handle_idx = handle_idx;
7887         /*
7888          * In some old rdma-core releases, before continuing, a check of the
7889          * length of matching parameter will be done at first. It needs to use
7890          * the length without misc4 param. If the flow has misc4 support, then
7891          * the length needs to be adjusted accordingly. Each param member is
7892          * aligned with a 64B boundary naturally.
7893          */
7894         dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param) -
7895                                   MLX5_ST_SZ_BYTES(fte_match_set_misc4);
7896         dev_flow->ingress = attr->ingress;
7897         dev_flow->dv.transfer = attr->transfer;
7898         return dev_flow;
7899 }
7900
7901 #ifdef RTE_LIBRTE_MLX5_DEBUG
7902 /**
7903  * Sanity check for match mask and value. Similar to check_valid_spec() in
7904  * kernel driver. If unmasked bit is present in value, it returns failure.
7905  *
7906  * @param match_mask
7907  *   pointer to match mask buffer.
7908  * @param match_value
7909  *   pointer to match value buffer.
7910  *
7911  * @return
7912  *   0 if valid, -EINVAL otherwise.
7913  */
7914 static int
7915 flow_dv_check_valid_spec(void *match_mask, void *match_value)
7916 {
7917         uint8_t *m = match_mask;
7918         uint8_t *v = match_value;
7919         unsigned int i;
7920
7921         for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
7922                 if (v[i] & ~m[i]) {
7923                         DRV_LOG(ERR,
7924                                 "match_value differs from match_criteria"
7925                                 " %p[%u] != %p[%u]",
7926                                 match_value, i, match_mask, i);
7927                         return -EINVAL;
7928                 }
7929         }
7930         return 0;
7931 }
7932 #endif
7933
7934 /**
7935  * Add match of ip_version.
7936  *
7937  * @param[in] group
7938  *   Flow group.
7939  * @param[in] headers_v
7940  *   Values header pointer.
7941  * @param[in] headers_m
7942  *   Masks header pointer.
7943  * @param[in] ip_version
7944  *   The IP version to set.
7945  */
7946 static inline void
7947 flow_dv_set_match_ip_version(uint32_t group,
7948                              void *headers_v,
7949                              void *headers_m,
7950                              uint8_t ip_version)
7951 {
7952         if (group == 0)
7953                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
7954         else
7955                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
7956                          ip_version);
7957         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version);
7958         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0);
7959         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0);
7960 }
7961
7962 /**
7963  * Add Ethernet item to matcher and to the value.
7964  *
7965  * @param[in, out] matcher
7966  *   Flow matcher.
7967  * @param[in, out] key
7968  *   Flow matcher value.
7969  * @param[in] item
7970  *   Flow pattern to translate.
7971  * @param[in] inner
7972  *   Item is inner pattern.
7973  */
7974 static void
7975 flow_dv_translate_item_eth(void *matcher, void *key,
7976                            const struct rte_flow_item *item, int inner,
7977                            uint32_t group)
7978 {
7979         const struct rte_flow_item_eth *eth_m = item->mask;
7980         const struct rte_flow_item_eth *eth_v = item->spec;
7981         const struct rte_flow_item_eth nic_mask = {
7982                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
7983                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
7984                 .type = RTE_BE16(0xffff),
7985                 .has_vlan = 0,
7986         };
7987         void *hdrs_m;
7988         void *hdrs_v;
7989         char *l24_v;
7990         unsigned int i;
7991
7992         if (!eth_v)
7993                 return;
7994         if (!eth_m)
7995                 eth_m = &nic_mask;
7996         if (inner) {
7997                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
7998                                          inner_headers);
7999                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8000         } else {
8001                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8002                                          outer_headers);
8003                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8004         }
8005         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, dmac_47_16),
8006                &eth_m->dst, sizeof(eth_m->dst));
8007         /* The value must be in the range of the mask. */
8008         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, dmac_47_16);
8009         for (i = 0; i < sizeof(eth_m->dst); ++i)
8010                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
8011         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, smac_47_16),
8012                &eth_m->src, sizeof(eth_m->src));
8013         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, smac_47_16);
8014         /* The value must be in the range of the mask. */
8015         for (i = 0; i < sizeof(eth_m->dst); ++i)
8016                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
8017         /*
8018          * HW supports match on one Ethertype, the Ethertype following the last
8019          * VLAN tag of the packet (see PRM).
8020          * Set match on ethertype only if ETH header is not followed by VLAN.
8021          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
8022          * ethertype, and use ip_version field instead.
8023          * eCPRI over Ether layer will use type value 0xAEFE.
8024          */
8025         if (eth_m->type == 0xFFFF) {
8026                 /* Set cvlan_tag mask for any single\multi\un-tagged case. */
8027                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8028                 switch (eth_v->type) {
8029                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
8030                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8031                         return;
8032                 case RTE_BE16(RTE_ETHER_TYPE_QINQ):
8033                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8034                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8035                         return;
8036                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
8037                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
8038                         return;
8039                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
8040                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
8041                         return;
8042                 default:
8043                         break;
8044                 }
8045         }
8046         if (eth_m->has_vlan) {
8047                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8048                 if (eth_v->has_vlan) {
8049                         /*
8050                          * Here, when also has_more_vlan field in VLAN item is
8051                          * not set, only single-tagged packets will be matched.
8052                          */
8053                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8054                         return;
8055                 }
8056         }
8057         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
8058                  rte_be_to_cpu_16(eth_m->type));
8059         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
8060         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
8061 }
8062
8063 /**
8064  * Add VLAN item to matcher and to the value.
8065  *
8066  * @param[in, out] dev_flow
8067  *   Flow descriptor.
8068  * @param[in, out] matcher
8069  *   Flow matcher.
8070  * @param[in, out] key
8071  *   Flow matcher value.
8072  * @param[in] item
8073  *   Flow pattern to translate.
8074  * @param[in] inner
8075  *   Item is inner pattern.
8076  */
8077 static void
8078 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
8079                             void *matcher, void *key,
8080                             const struct rte_flow_item *item,
8081                             int inner, uint32_t group)
8082 {
8083         const struct rte_flow_item_vlan *vlan_m = item->mask;
8084         const struct rte_flow_item_vlan *vlan_v = item->spec;
8085         void *hdrs_m;
8086         void *hdrs_v;
8087         uint16_t tci_m;
8088         uint16_t tci_v;
8089
8090         if (inner) {
8091                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8092                                          inner_headers);
8093                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8094         } else {
8095                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8096                                          outer_headers);
8097                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8098                 /*
8099                  * This is workaround, masks are not supported,
8100                  * and pre-validated.
8101                  */
8102                 if (vlan_v)
8103                         dev_flow->handle->vf_vlan.tag =
8104                                         rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
8105         }
8106         /*
8107          * When VLAN item exists in flow, mark packet as tagged,
8108          * even if TCI is not specified.
8109          */
8110         if (!MLX5_GET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag)) {
8111                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8112                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8113         }
8114         if (!vlan_v)
8115                 return;
8116         if (!vlan_m)
8117                 vlan_m = &rte_flow_item_vlan_mask;
8118         tci_m = rte_be_to_cpu_16(vlan_m->tci);
8119         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
8120         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_vid, tci_m);
8121         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_vid, tci_v);
8122         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_cfi, tci_m >> 12);
8123         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_cfi, tci_v >> 12);
8124         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_prio, tci_m >> 13);
8125         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_prio, tci_v >> 13);
8126         /*
8127          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
8128          * ethertype, and use ip_version field instead.
8129          */
8130         if (vlan_m->inner_type == 0xFFFF) {
8131                 switch (vlan_v->inner_type) {
8132                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
8133                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8134                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8135                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
8136                         return;
8137                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
8138                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
8139                         return;
8140                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
8141                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
8142                         return;
8143                 default:
8144                         break;
8145                 }
8146         }
8147         if (vlan_m->has_more_vlan && vlan_v->has_more_vlan) {
8148                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8149                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8150                 /* Only one vlan_tag bit can be set. */
8151                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
8152                 return;
8153         }
8154         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
8155                  rte_be_to_cpu_16(vlan_m->inner_type));
8156         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, ethertype,
8157                  rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
8158 }
8159
8160 /**
8161  * Add IPV4 item to matcher and to the value.
8162  *
8163  * @param[in, out] matcher
8164  *   Flow matcher.
8165  * @param[in, out] key
8166  *   Flow matcher value.
8167  * @param[in] item
8168  *   Flow pattern to translate.
8169  * @param[in] inner
8170  *   Item is inner pattern.
8171  * @param[in] group
8172  *   The group to insert the rule.
8173  */
8174 static void
8175 flow_dv_translate_item_ipv4(void *matcher, void *key,
8176                             const struct rte_flow_item *item,
8177                             int inner, uint32_t group)
8178 {
8179         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
8180         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
8181         const struct rte_flow_item_ipv4 nic_mask = {
8182                 .hdr = {
8183                         .src_addr = RTE_BE32(0xffffffff),
8184                         .dst_addr = RTE_BE32(0xffffffff),
8185                         .type_of_service = 0xff,
8186                         .next_proto_id = 0xff,
8187                         .time_to_live = 0xff,
8188                 },
8189         };
8190         void *headers_m;
8191         void *headers_v;
8192         char *l24_m;
8193         char *l24_v;
8194         uint8_t tos;
8195
8196         if (inner) {
8197                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8198                                          inner_headers);
8199                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8200         } else {
8201                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8202                                          outer_headers);
8203                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8204         }
8205         flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
8206         if (!ipv4_v)
8207                 return;
8208         if (!ipv4_m)
8209                 ipv4_m = &nic_mask;
8210         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8211                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
8212         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8213                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
8214         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
8215         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
8216         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8217                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
8218         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8219                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
8220         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
8221         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
8222         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
8223         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
8224                  ipv4_m->hdr.type_of_service);
8225         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
8226         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
8227                  ipv4_m->hdr.type_of_service >> 2);
8228         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
8229         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8230                  ipv4_m->hdr.next_proto_id);
8231         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8232                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
8233         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
8234                  ipv4_m->hdr.time_to_live);
8235         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
8236                  ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
8237         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
8238                  !!(ipv4_m->hdr.fragment_offset));
8239         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
8240                  !!(ipv4_v->hdr.fragment_offset & ipv4_m->hdr.fragment_offset));
8241 }
8242
8243 /**
8244  * Add IPV6 item to matcher and to the value.
8245  *
8246  * @param[in, out] matcher
8247  *   Flow matcher.
8248  * @param[in, out] key
8249  *   Flow matcher value.
8250  * @param[in] item
8251  *   Flow pattern to translate.
8252  * @param[in] inner
8253  *   Item is inner pattern.
8254  * @param[in] group
8255  *   The group to insert the rule.
8256  */
8257 static void
8258 flow_dv_translate_item_ipv6(void *matcher, void *key,
8259                             const struct rte_flow_item *item,
8260                             int inner, uint32_t group)
8261 {
8262         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
8263         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
8264         const struct rte_flow_item_ipv6 nic_mask = {
8265                 .hdr = {
8266                         .src_addr =
8267                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
8268                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
8269                         .dst_addr =
8270                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
8271                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
8272                         .vtc_flow = RTE_BE32(0xffffffff),
8273                         .proto = 0xff,
8274                         .hop_limits = 0xff,
8275                 },
8276         };
8277         void *headers_m;
8278         void *headers_v;
8279         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8280         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8281         char *l24_m;
8282         char *l24_v;
8283         uint32_t vtc_m;
8284         uint32_t vtc_v;
8285         int i;
8286         int size;
8287
8288         if (inner) {
8289                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8290                                          inner_headers);
8291                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8292         } else {
8293                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8294                                          outer_headers);
8295                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8296         }
8297         flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
8298         if (!ipv6_v)
8299                 return;
8300         if (!ipv6_m)
8301                 ipv6_m = &nic_mask;
8302         size = sizeof(ipv6_m->hdr.dst_addr);
8303         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8304                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
8305         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8306                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
8307         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
8308         for (i = 0; i < size; ++i)
8309                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
8310         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8311                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
8312         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8313                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
8314         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
8315         for (i = 0; i < size; ++i)
8316                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
8317         /* TOS. */
8318         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
8319         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
8320         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
8321         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
8322         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
8323         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
8324         /* Label. */
8325         if (inner) {
8326                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
8327                          vtc_m);
8328                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
8329                          vtc_v);
8330         } else {
8331                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
8332                          vtc_m);
8333                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
8334                          vtc_v);
8335         }
8336         /* Protocol. */
8337         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8338                  ipv6_m->hdr.proto);
8339         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8340                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
8341         /* Hop limit. */
8342         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
8343                  ipv6_m->hdr.hop_limits);
8344         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
8345                  ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
8346         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
8347                  !!(ipv6_m->has_frag_ext));
8348         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
8349                  !!(ipv6_v->has_frag_ext & ipv6_m->has_frag_ext));
8350 }
8351
8352 /**
8353  * Add IPV6 fragment extension item to matcher and to the value.
8354  *
8355  * @param[in, out] matcher
8356  *   Flow matcher.
8357  * @param[in, out] key
8358  *   Flow matcher value.
8359  * @param[in] item
8360  *   Flow pattern to translate.
8361  * @param[in] inner
8362  *   Item is inner pattern.
8363  */
8364 static void
8365 flow_dv_translate_item_ipv6_frag_ext(void *matcher, void *key,
8366                                      const struct rte_flow_item *item,
8367                                      int inner)
8368 {
8369         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_m = item->mask;
8370         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_v = item->spec;
8371         const struct rte_flow_item_ipv6_frag_ext nic_mask = {
8372                 .hdr = {
8373                         .next_header = 0xff,
8374                         .frag_data = RTE_BE16(0xffff),
8375                 },
8376         };
8377         void *headers_m;
8378         void *headers_v;
8379
8380         if (inner) {
8381                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8382                                          inner_headers);
8383                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8384         } else {
8385                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8386                                          outer_headers);
8387                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8388         }
8389         /* IPv6 fragment extension item exists, so packet is IP fragment. */
8390         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
8391         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 1);
8392         if (!ipv6_frag_ext_v)
8393                 return;
8394         if (!ipv6_frag_ext_m)
8395                 ipv6_frag_ext_m = &nic_mask;
8396         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8397                  ipv6_frag_ext_m->hdr.next_header);
8398         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8399                  ipv6_frag_ext_v->hdr.next_header &
8400                  ipv6_frag_ext_m->hdr.next_header);
8401 }
8402
8403 /**
8404  * Add TCP item to matcher and to the value.
8405  *
8406  * @param[in, out] matcher
8407  *   Flow matcher.
8408  * @param[in, out] key
8409  *   Flow matcher value.
8410  * @param[in] item
8411  *   Flow pattern to translate.
8412  * @param[in] inner
8413  *   Item is inner pattern.
8414  */
8415 static void
8416 flow_dv_translate_item_tcp(void *matcher, void *key,
8417                            const struct rte_flow_item *item,
8418                            int inner)
8419 {
8420         const struct rte_flow_item_tcp *tcp_m = item->mask;
8421         const struct rte_flow_item_tcp *tcp_v = item->spec;
8422         void *headers_m;
8423         void *headers_v;
8424
8425         if (inner) {
8426                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8427                                          inner_headers);
8428                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8429         } else {
8430                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8431                                          outer_headers);
8432                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8433         }
8434         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8435         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
8436         if (!tcp_v)
8437                 return;
8438         if (!tcp_m)
8439                 tcp_m = &rte_flow_item_tcp_mask;
8440         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
8441                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
8442         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
8443                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
8444         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
8445                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
8446         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
8447                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
8448         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
8449                  tcp_m->hdr.tcp_flags);
8450         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
8451                  (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
8452 }
8453
8454 /**
8455  * Add UDP item to matcher and to the value.
8456  *
8457  * @param[in, out] matcher
8458  *   Flow matcher.
8459  * @param[in, out] key
8460  *   Flow matcher value.
8461  * @param[in] item
8462  *   Flow pattern to translate.
8463  * @param[in] inner
8464  *   Item is inner pattern.
8465  */
8466 static void
8467 flow_dv_translate_item_udp(void *matcher, void *key,
8468                            const struct rte_flow_item *item,
8469                            int inner)
8470 {
8471         const struct rte_flow_item_udp *udp_m = item->mask;
8472         const struct rte_flow_item_udp *udp_v = item->spec;
8473         void *headers_m;
8474         void *headers_v;
8475
8476         if (inner) {
8477                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8478                                          inner_headers);
8479                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8480         } else {
8481                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8482                                          outer_headers);
8483                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8484         }
8485         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8486         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
8487         if (!udp_v)
8488                 return;
8489         if (!udp_m)
8490                 udp_m = &rte_flow_item_udp_mask;
8491         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
8492                  rte_be_to_cpu_16(udp_m->hdr.src_port));
8493         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
8494                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
8495         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
8496                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
8497         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
8498                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
8499 }
8500
8501 /**
8502  * Add GRE optional Key item to matcher and to the value.
8503  *
8504  * @param[in, out] matcher
8505  *   Flow matcher.
8506  * @param[in, out] key
8507  *   Flow matcher value.
8508  * @param[in] item
8509  *   Flow pattern to translate.
8510  * @param[in] inner
8511  *   Item is inner pattern.
8512  */
8513 static void
8514 flow_dv_translate_item_gre_key(void *matcher, void *key,
8515                                    const struct rte_flow_item *item)
8516 {
8517         const rte_be32_t *key_m = item->mask;
8518         const rte_be32_t *key_v = item->spec;
8519         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8520         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8521         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
8522
8523         /* GRE K bit must be on and should already be validated */
8524         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
8525         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
8526         if (!key_v)
8527                 return;
8528         if (!key_m)
8529                 key_m = &gre_key_default_mask;
8530         MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
8531                  rte_be_to_cpu_32(*key_m) >> 8);
8532         MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
8533                  rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
8534         MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
8535                  rte_be_to_cpu_32(*key_m) & 0xFF);
8536         MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
8537                  rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
8538 }
8539
8540 /**
8541  * Add GRE item to matcher and to the value.
8542  *
8543  * @param[in, out] matcher
8544  *   Flow matcher.
8545  * @param[in, out] key
8546  *   Flow matcher value.
8547  * @param[in] item
8548  *   Flow pattern to translate.
8549  * @param[in] inner
8550  *   Item is inner pattern.
8551  */
8552 static void
8553 flow_dv_translate_item_gre(void *matcher, void *key,
8554                            const struct rte_flow_item *item,
8555                            int inner)
8556 {
8557         const struct rte_flow_item_gre *gre_m = item->mask;
8558         const struct rte_flow_item_gre *gre_v = item->spec;
8559         void *headers_m;
8560         void *headers_v;
8561         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8562         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8563         struct {
8564                 union {
8565                         __extension__
8566                         struct {
8567                                 uint16_t version:3;
8568                                 uint16_t rsvd0:9;
8569                                 uint16_t s_present:1;
8570                                 uint16_t k_present:1;
8571                                 uint16_t rsvd_bit1:1;
8572                                 uint16_t c_present:1;
8573                         };
8574                         uint16_t value;
8575                 };
8576         } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
8577
8578         if (inner) {
8579                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8580                                          inner_headers);
8581                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8582         } else {
8583                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8584                                          outer_headers);
8585                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8586         }
8587         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8588         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
8589         if (!gre_v)
8590                 return;
8591         if (!gre_m)
8592                 gre_m = &rte_flow_item_gre_mask;
8593         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
8594                  rte_be_to_cpu_16(gre_m->protocol));
8595         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
8596                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
8597         gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
8598         gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
8599         MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
8600                  gre_crks_rsvd0_ver_m.c_present);
8601         MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
8602                  gre_crks_rsvd0_ver_v.c_present &
8603                  gre_crks_rsvd0_ver_m.c_present);
8604         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
8605                  gre_crks_rsvd0_ver_m.k_present);
8606         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
8607                  gre_crks_rsvd0_ver_v.k_present &
8608                  gre_crks_rsvd0_ver_m.k_present);
8609         MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
8610                  gre_crks_rsvd0_ver_m.s_present);
8611         MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
8612                  gre_crks_rsvd0_ver_v.s_present &
8613                  gre_crks_rsvd0_ver_m.s_present);
8614 }
8615
8616 /**
8617  * Add NVGRE item to matcher and to the value.
8618  *
8619  * @param[in, out] matcher
8620  *   Flow matcher.
8621  * @param[in, out] key
8622  *   Flow matcher value.
8623  * @param[in] item
8624  *   Flow pattern to translate.
8625  * @param[in] inner
8626  *   Item is inner pattern.
8627  */
8628 static void
8629 flow_dv_translate_item_nvgre(void *matcher, void *key,
8630                              const struct rte_flow_item *item,
8631                              int inner)
8632 {
8633         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
8634         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
8635         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8636         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8637         const char *tni_flow_id_m;
8638         const char *tni_flow_id_v;
8639         char *gre_key_m;
8640         char *gre_key_v;
8641         int size;
8642         int i;
8643
8644         /* For NVGRE, GRE header fields must be set with defined values. */
8645         const struct rte_flow_item_gre gre_spec = {
8646                 .c_rsvd0_ver = RTE_BE16(0x2000),
8647                 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
8648         };
8649         const struct rte_flow_item_gre gre_mask = {
8650                 .c_rsvd0_ver = RTE_BE16(0xB000),
8651                 .protocol = RTE_BE16(UINT16_MAX),
8652         };
8653         const struct rte_flow_item gre_item = {
8654                 .spec = &gre_spec,
8655                 .mask = &gre_mask,
8656                 .last = NULL,
8657         };
8658         flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
8659         if (!nvgre_v)
8660                 return;
8661         if (!nvgre_m)
8662                 nvgre_m = &rte_flow_item_nvgre_mask;
8663         tni_flow_id_m = (const char *)nvgre_m->tni;
8664         tni_flow_id_v = (const char *)nvgre_v->tni;
8665         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
8666         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
8667         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
8668         memcpy(gre_key_m, tni_flow_id_m, size);
8669         for (i = 0; i < size; ++i)
8670                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
8671 }
8672
8673 /**
8674  * Add VXLAN item to matcher and to the value.
8675  *
8676  * @param[in, out] matcher
8677  *   Flow matcher.
8678  * @param[in, out] key
8679  *   Flow matcher value.
8680  * @param[in] item
8681  *   Flow pattern to translate.
8682  * @param[in] inner
8683  *   Item is inner pattern.
8684  */
8685 static void
8686 flow_dv_translate_item_vxlan(void *matcher, void *key,
8687                              const struct rte_flow_item *item,
8688                              int inner)
8689 {
8690         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
8691         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
8692         void *headers_m;
8693         void *headers_v;
8694         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8695         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8696         char *vni_m;
8697         char *vni_v;
8698         uint16_t dport;
8699         int size;
8700         int i;
8701
8702         if (inner) {
8703                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8704                                          inner_headers);
8705                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8706         } else {
8707                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8708                                          outer_headers);
8709                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8710         }
8711         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
8712                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
8713         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8714                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8715                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8716         }
8717         if (!vxlan_v)
8718                 return;
8719         if (!vxlan_m)
8720                 vxlan_m = &rte_flow_item_vxlan_mask;
8721         size = sizeof(vxlan_m->vni);
8722         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
8723         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
8724         memcpy(vni_m, vxlan_m->vni, size);
8725         for (i = 0; i < size; ++i)
8726                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
8727 }
8728
8729 /**
8730  * Add VXLAN-GPE item to matcher and to the value.
8731  *
8732  * @param[in, out] matcher
8733  *   Flow matcher.
8734  * @param[in, out] key
8735  *   Flow matcher value.
8736  * @param[in] item
8737  *   Flow pattern to translate.
8738  * @param[in] inner
8739  *   Item is inner pattern.
8740  */
8741
8742 static void
8743 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
8744                                  const struct rte_flow_item *item, int inner)
8745 {
8746         const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
8747         const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
8748         void *headers_m;
8749         void *headers_v;
8750         void *misc_m =
8751                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
8752         void *misc_v =
8753                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
8754         char *vni_m;
8755         char *vni_v;
8756         uint16_t dport;
8757         int size;
8758         int i;
8759         uint8_t flags_m = 0xff;
8760         uint8_t flags_v = 0xc;
8761
8762         if (inner) {
8763                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8764                                          inner_headers);
8765                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8766         } else {
8767                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8768                                          outer_headers);
8769                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8770         }
8771         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
8772                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
8773         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8774                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8775                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8776         }
8777         if (!vxlan_v)
8778                 return;
8779         if (!vxlan_m)
8780                 vxlan_m = &rte_flow_item_vxlan_gpe_mask;
8781         size = sizeof(vxlan_m->vni);
8782         vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
8783         vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
8784         memcpy(vni_m, vxlan_m->vni, size);
8785         for (i = 0; i < size; ++i)
8786                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
8787         if (vxlan_m->flags) {
8788                 flags_m = vxlan_m->flags;
8789                 flags_v = vxlan_v->flags;
8790         }
8791         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
8792         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
8793         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol,
8794                  vxlan_m->protocol);
8795         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol,
8796                  vxlan_v->protocol);
8797 }
8798
8799 /**
8800  * Add Geneve item to matcher and to the value.
8801  *
8802  * @param[in, out] matcher
8803  *   Flow matcher.
8804  * @param[in, out] key
8805  *   Flow matcher value.
8806  * @param[in] item
8807  *   Flow pattern to translate.
8808  * @param[in] inner
8809  *   Item is inner pattern.
8810  */
8811
8812 static void
8813 flow_dv_translate_item_geneve(void *matcher, void *key,
8814                               const struct rte_flow_item *item, int inner)
8815 {
8816         const struct rte_flow_item_geneve *geneve_m = item->mask;
8817         const struct rte_flow_item_geneve *geneve_v = item->spec;
8818         void *headers_m;
8819         void *headers_v;
8820         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8821         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8822         uint16_t dport;
8823         uint16_t gbhdr_m;
8824         uint16_t gbhdr_v;
8825         char *vni_m;
8826         char *vni_v;
8827         size_t size, i;
8828
8829         if (inner) {
8830                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8831                                          inner_headers);
8832                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8833         } else {
8834                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8835                                          outer_headers);
8836                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8837         }
8838         dport = MLX5_UDP_PORT_GENEVE;
8839         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8840                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8841                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8842         }
8843         if (!geneve_v)
8844                 return;
8845         if (!geneve_m)
8846                 geneve_m = &rte_flow_item_geneve_mask;
8847         size = sizeof(geneve_m->vni);
8848         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
8849         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
8850         memcpy(vni_m, geneve_m->vni, size);
8851         for (i = 0; i < size; ++i)
8852                 vni_v[i] = vni_m[i] & geneve_v->vni[i];
8853         MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
8854                  rte_be_to_cpu_16(geneve_m->protocol));
8855         MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
8856                  rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
8857         gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
8858         gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
8859         MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
8860                  MLX5_GENEVE_OAMF_VAL(gbhdr_m));
8861         MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
8862                  MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
8863         MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
8864                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
8865         MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
8866                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
8867                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
8868 }
8869
8870 /**
8871  * Create Geneve TLV option resource.
8872  *
8873  * @param dev[in, out]
8874  *   Pointer to rte_eth_dev structure.
8875  * @param[in, out] tag_be24
8876  *   Tag value in big endian then R-shift 8.
8877  * @parm[in, out] dev_flow
8878  *   Pointer to the dev_flow.
8879  * @param[out] error
8880  *   pointer to error structure.
8881  *
8882  * @return
8883  *   0 on success otherwise -errno and errno is set.
8884  */
8885
8886 int
8887 flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev,
8888                                              const struct rte_flow_item *item,
8889                                              struct rte_flow_error *error)
8890 {
8891         struct mlx5_priv *priv = dev->data->dev_private;
8892         struct mlx5_dev_ctx_shared *sh = priv->sh;
8893         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
8894                         sh->geneve_tlv_option_resource;
8895         struct mlx5_devx_obj *obj;
8896         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
8897         int ret = 0;
8898
8899         if (!geneve_opt_v)
8900                 return -1;
8901         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
8902         if (geneve_opt_resource != NULL) {
8903                 if (geneve_opt_resource->option_class ==
8904                         geneve_opt_v->option_class &&
8905                         geneve_opt_resource->option_type ==
8906                         geneve_opt_v->option_type &&
8907                         geneve_opt_resource->length ==
8908                         geneve_opt_v->option_len) {
8909                         /* We already have GENVE TLV option obj allocated. */
8910                         __atomic_fetch_add(&geneve_opt_resource->refcnt, 1,
8911                                            __ATOMIC_RELAXED);
8912                 } else {
8913                         ret = rte_flow_error_set(error, ENOMEM,
8914                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8915                                 "Only one GENEVE TLV option supported");
8916                         goto exit;
8917                 }
8918         } else {
8919                 /* Create a GENEVE TLV object and resource. */
8920                 obj = mlx5_devx_cmd_create_geneve_tlv_option(sh->ctx,
8921                                 geneve_opt_v->option_class,
8922                                 geneve_opt_v->option_type,
8923                                 geneve_opt_v->option_len);
8924                 if (!obj) {
8925                         ret = rte_flow_error_set(error, ENODATA,
8926                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8927                                 "Failed to create GENEVE TLV Devx object");
8928                         goto exit;
8929                 }
8930                 sh->geneve_tlv_option_resource =
8931                                 mlx5_malloc(MLX5_MEM_ZERO,
8932                                                 sizeof(*geneve_opt_resource),
8933                                                 0, SOCKET_ID_ANY);
8934                 if (!sh->geneve_tlv_option_resource) {
8935                         claim_zero(mlx5_devx_cmd_destroy(obj));
8936                         ret = rte_flow_error_set(error, ENOMEM,
8937                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8938                                 "GENEVE TLV object memory allocation failed");
8939                         goto exit;
8940                 }
8941                 geneve_opt_resource = sh->geneve_tlv_option_resource;
8942                 geneve_opt_resource->obj = obj;
8943                 geneve_opt_resource->option_class = geneve_opt_v->option_class;
8944                 geneve_opt_resource->option_type = geneve_opt_v->option_type;
8945                 geneve_opt_resource->length = geneve_opt_v->option_len;
8946                 __atomic_store_n(&geneve_opt_resource->refcnt, 1,
8947                                 __ATOMIC_RELAXED);
8948         }
8949 exit:
8950         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
8951         return ret;
8952 }
8953
8954 /**
8955  * Add Geneve TLV option item to matcher.
8956  *
8957  * @param[in, out] dev
8958  *   Pointer to rte_eth_dev structure.
8959  * @param[in, out] matcher
8960  *   Flow matcher.
8961  * @param[in, out] key
8962  *   Flow matcher value.
8963  * @param[in] item
8964  *   Flow pattern to translate.
8965  * @param[out] error
8966  *   Pointer to error structure.
8967  */
8968 static int
8969 flow_dv_translate_item_geneve_opt(struct rte_eth_dev *dev, void *matcher,
8970                                   void *key, const struct rte_flow_item *item,
8971                                   struct rte_flow_error *error)
8972 {
8973         const struct rte_flow_item_geneve_opt *geneve_opt_m = item->mask;
8974         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
8975         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8976         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8977         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
8978                         misc_parameters_3);
8979         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
8980         rte_be32_t opt_data_key = 0, opt_data_mask = 0;
8981         int ret = 0;
8982
8983         if (!geneve_opt_v)
8984                 return -1;
8985         if (!geneve_opt_m)
8986                 geneve_opt_m = &rte_flow_item_geneve_opt_mask;
8987         ret = flow_dev_geneve_tlv_option_resource_register(dev, item,
8988                                                            error);
8989         if (ret) {
8990                 DRV_LOG(ERR, "Failed to create geneve_tlv_obj");
8991                 return ret;
8992         }
8993         /*
8994          * Set the option length in GENEVE header if not requested.
8995          * The GENEVE TLV option length is expressed by the option length field
8996          * in the GENEVE header.
8997          * If the option length was not requested but the GENEVE TLV option item
8998          * is present we set the option length field implicitly.
8999          */
9000         if (!MLX5_GET16(fte_match_set_misc, misc_m, geneve_opt_len)) {
9001                 MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
9002                          MLX5_GENEVE_OPTLEN_MASK);
9003                 MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
9004                          geneve_opt_v->option_len + 1);
9005         }
9006         /* Set the data. */
9007         if (geneve_opt_v->data) {
9008                 memcpy(&opt_data_key, geneve_opt_v->data,
9009                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
9010                                 sizeof(opt_data_key)));
9011                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
9012                                 sizeof(opt_data_key));
9013                 memcpy(&opt_data_mask, geneve_opt_m->data,
9014                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
9015                                 sizeof(opt_data_mask)));
9016                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
9017                                 sizeof(opt_data_mask));
9018                 MLX5_SET(fte_match_set_misc3, misc3_m,
9019                                 geneve_tlv_option_0_data,
9020                                 rte_be_to_cpu_32(opt_data_mask));
9021                 MLX5_SET(fte_match_set_misc3, misc3_v,
9022                                 geneve_tlv_option_0_data,
9023                         rte_be_to_cpu_32(opt_data_key & opt_data_mask));
9024         }
9025         return ret;
9026 }
9027
9028 /**
9029  * Add MPLS item to matcher and to the value.
9030  *
9031  * @param[in, out] matcher
9032  *   Flow matcher.
9033  * @param[in, out] key
9034  *   Flow matcher value.
9035  * @param[in] item
9036  *   Flow pattern to translate.
9037  * @param[in] prev_layer
9038  *   The protocol layer indicated in previous item.
9039  * @param[in] inner
9040  *   Item is inner pattern.
9041  */
9042 static void
9043 flow_dv_translate_item_mpls(void *matcher, void *key,
9044                             const struct rte_flow_item *item,
9045                             uint64_t prev_layer,
9046                             int inner)
9047 {
9048         const uint32_t *in_mpls_m = item->mask;
9049         const uint32_t *in_mpls_v = item->spec;
9050         uint32_t *out_mpls_m = 0;
9051         uint32_t *out_mpls_v = 0;
9052         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9053         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9054         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
9055                                      misc_parameters_2);
9056         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
9057         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
9058         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9059
9060         switch (prev_layer) {
9061         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
9062                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
9063                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
9064                          MLX5_UDP_PORT_MPLS);
9065                 break;
9066         case MLX5_FLOW_LAYER_GRE:
9067                 /* Fall-through. */
9068         case MLX5_FLOW_LAYER_GRE_KEY:
9069                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
9070                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
9071                          RTE_ETHER_TYPE_MPLS);
9072                 break;
9073         default:
9074                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
9075                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
9076                          IPPROTO_MPLS);
9077                 break;
9078         }
9079         if (!in_mpls_v)
9080                 return;
9081         if (!in_mpls_m)
9082                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
9083         switch (prev_layer) {
9084         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
9085                 out_mpls_m =
9086                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
9087                                                  outer_first_mpls_over_udp);
9088                 out_mpls_v =
9089                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
9090                                                  outer_first_mpls_over_udp);
9091                 break;
9092         case MLX5_FLOW_LAYER_GRE:
9093                 out_mpls_m =
9094                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
9095                                                  outer_first_mpls_over_gre);
9096                 out_mpls_v =
9097                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
9098                                                  outer_first_mpls_over_gre);
9099                 break;
9100         default:
9101                 /* Inner MPLS not over GRE is not supported. */
9102                 if (!inner) {
9103                         out_mpls_m =
9104                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
9105                                                          misc2_m,
9106                                                          outer_first_mpls);
9107                         out_mpls_v =
9108                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
9109                                                          misc2_v,
9110                                                          outer_first_mpls);
9111                 }
9112                 break;
9113         }
9114         if (out_mpls_m && out_mpls_v) {
9115                 *out_mpls_m = *in_mpls_m;
9116                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
9117         }
9118 }
9119
9120 /**
9121  * Add metadata register item to matcher
9122  *
9123  * @param[in, out] matcher
9124  *   Flow matcher.
9125  * @param[in, out] key
9126  *   Flow matcher value.
9127  * @param[in] reg_type
9128  *   Type of device metadata register
9129  * @param[in] value
9130  *   Register value
9131  * @param[in] mask
9132  *   Register mask
9133  */
9134 static void
9135 flow_dv_match_meta_reg(void *matcher, void *key,
9136                        enum modify_reg reg_type,
9137                        uint32_t data, uint32_t mask)
9138 {
9139         void *misc2_m =
9140                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
9141         void *misc2_v =
9142                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
9143         uint32_t temp;
9144
9145         data &= mask;
9146         switch (reg_type) {
9147         case REG_A:
9148                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
9149                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
9150                 break;
9151         case REG_B:
9152                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
9153                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
9154                 break;
9155         case REG_C_0:
9156                 /*
9157                  * The metadata register C0 field might be divided into
9158                  * source vport index and META item value, we should set
9159                  * this field according to specified mask, not as whole one.
9160                  */
9161                 temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
9162                 temp |= mask;
9163                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
9164                 temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
9165                 temp &= ~mask;
9166                 temp |= data;
9167                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
9168                 break;
9169         case REG_C_1:
9170                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
9171                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
9172                 break;
9173         case REG_C_2:
9174                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
9175                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
9176                 break;
9177         case REG_C_3:
9178                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
9179                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
9180                 break;
9181         case REG_C_4:
9182                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
9183                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
9184                 break;
9185         case REG_C_5:
9186                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
9187                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
9188                 break;
9189         case REG_C_6:
9190                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
9191                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
9192                 break;
9193         case REG_C_7:
9194                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
9195                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
9196                 break;
9197         default:
9198                 MLX5_ASSERT(false);
9199                 break;
9200         }
9201 }
9202
9203 /**
9204  * Add MARK item to matcher
9205  *
9206  * @param[in] dev
9207  *   The device to configure through.
9208  * @param[in, out] matcher
9209  *   Flow matcher.
9210  * @param[in, out] key
9211  *   Flow matcher value.
9212  * @param[in] item
9213  *   Flow pattern to translate.
9214  */
9215 static void
9216 flow_dv_translate_item_mark(struct rte_eth_dev *dev,
9217                             void *matcher, void *key,
9218                             const struct rte_flow_item *item)
9219 {
9220         struct mlx5_priv *priv = dev->data->dev_private;
9221         const struct rte_flow_item_mark *mark;
9222         uint32_t value;
9223         uint32_t mask;
9224
9225         mark = item->mask ? (const void *)item->mask :
9226                             &rte_flow_item_mark_mask;
9227         mask = mark->id & priv->sh->dv_mark_mask;
9228         mark = (const void *)item->spec;
9229         MLX5_ASSERT(mark);
9230         value = mark->id & priv->sh->dv_mark_mask & mask;
9231         if (mask) {
9232                 enum modify_reg reg;
9233
9234                 /* Get the metadata register index for the mark. */
9235                 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
9236                 MLX5_ASSERT(reg > 0);
9237                 if (reg == REG_C_0) {
9238                         struct mlx5_priv *priv = dev->data->dev_private;
9239                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9240                         uint32_t shl_c0 = rte_bsf32(msk_c0);
9241
9242                         mask &= msk_c0;
9243                         mask <<= shl_c0;
9244                         value <<= shl_c0;
9245                 }
9246                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
9247         }
9248 }
9249
9250 /**
9251  * Add META item to matcher
9252  *
9253  * @param[in] dev
9254  *   The devich to configure through.
9255  * @param[in, out] matcher
9256  *   Flow matcher.
9257  * @param[in, out] key
9258  *   Flow matcher value.
9259  * @param[in] attr
9260  *   Attributes of flow that includes this item.
9261  * @param[in] item
9262  *   Flow pattern to translate.
9263  */
9264 static void
9265 flow_dv_translate_item_meta(struct rte_eth_dev *dev,
9266                             void *matcher, void *key,
9267                             const struct rte_flow_attr *attr,
9268                             const struct rte_flow_item *item)
9269 {
9270         const struct rte_flow_item_meta *meta_m;
9271         const struct rte_flow_item_meta *meta_v;
9272
9273         meta_m = (const void *)item->mask;
9274         if (!meta_m)
9275                 meta_m = &rte_flow_item_meta_mask;
9276         meta_v = (const void *)item->spec;
9277         if (meta_v) {
9278                 int reg;
9279                 uint32_t value = meta_v->data;
9280                 uint32_t mask = meta_m->data;
9281
9282                 reg = flow_dv_get_metadata_reg(dev, attr, NULL);
9283                 if (reg < 0)
9284                         return;
9285                 MLX5_ASSERT(reg != REG_NON);
9286                 if (reg == REG_C_0) {
9287                         struct mlx5_priv *priv = dev->data->dev_private;
9288                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9289                         uint32_t shl_c0 = rte_bsf32(msk_c0);
9290
9291                         mask &= msk_c0;
9292                         mask <<= shl_c0;
9293                         value <<= shl_c0;
9294                 }
9295                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
9296         }
9297 }
9298
9299 /**
9300  * Add vport metadata Reg C0 item to matcher
9301  *
9302  * @param[in, out] matcher
9303  *   Flow matcher.
9304  * @param[in, out] key
9305  *   Flow matcher value.
9306  * @param[in] reg
9307  *   Flow pattern to translate.
9308  */
9309 static void
9310 flow_dv_translate_item_meta_vport(void *matcher, void *key,
9311                                   uint32_t value, uint32_t mask)
9312 {
9313         flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
9314 }
9315
9316 /**
9317  * Add tag item to matcher
9318  *
9319  * @param[in] dev
9320  *   The devich to configure through.
9321  * @param[in, out] matcher
9322  *   Flow matcher.
9323  * @param[in, out] key
9324  *   Flow matcher value.
9325  * @param[in] item
9326  *   Flow pattern to translate.
9327  */
9328 static void
9329 flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
9330                                 void *matcher, void *key,
9331                                 const struct rte_flow_item *item)
9332 {
9333         const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
9334         const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
9335         uint32_t mask, value;
9336
9337         MLX5_ASSERT(tag_v);
9338         value = tag_v->data;
9339         mask = tag_m ? tag_m->data : UINT32_MAX;
9340         if (tag_v->id == REG_C_0) {
9341                 struct mlx5_priv *priv = dev->data->dev_private;
9342                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9343                 uint32_t shl_c0 = rte_bsf32(msk_c0);
9344
9345                 mask &= msk_c0;
9346                 mask <<= shl_c0;
9347                 value <<= shl_c0;
9348         }
9349         flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
9350 }
9351
9352 /**
9353  * Add TAG item to matcher
9354  *
9355  * @param[in] dev
9356  *   The devich to configure through.
9357  * @param[in, out] matcher
9358  *   Flow matcher.
9359  * @param[in, out] key
9360  *   Flow matcher value.
9361  * @param[in] item
9362  *   Flow pattern to translate.
9363  */
9364 static void
9365 flow_dv_translate_item_tag(struct rte_eth_dev *dev,
9366                            void *matcher, void *key,
9367                            const struct rte_flow_item *item)
9368 {
9369         const struct rte_flow_item_tag *tag_v = item->spec;
9370         const struct rte_flow_item_tag *tag_m = item->mask;
9371         enum modify_reg reg;
9372
9373         MLX5_ASSERT(tag_v);
9374         tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
9375         /* Get the metadata register index for the tag. */
9376         reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
9377         MLX5_ASSERT(reg > 0);
9378         flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
9379 }
9380
9381 /**
9382  * Add source vport match to the specified matcher.
9383  *
9384  * @param[in, out] matcher
9385  *   Flow matcher.
9386  * @param[in, out] key
9387  *   Flow matcher value.
9388  * @param[in] port
9389  *   Source vport value to match
9390  * @param[in] mask
9391  *   Mask
9392  */
9393 static void
9394 flow_dv_translate_item_source_vport(void *matcher, void *key,
9395                                     int16_t port, uint16_t mask)
9396 {
9397         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9398         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9399
9400         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
9401         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
9402 }
9403
9404 /**
9405  * Translate port-id item to eswitch match on  port-id.
9406  *
9407  * @param[in] dev
9408  *   The devich to configure through.
9409  * @param[in, out] matcher
9410  *   Flow matcher.
9411  * @param[in, out] key
9412  *   Flow matcher value.
9413  * @param[in] item
9414  *   Flow pattern to translate.
9415  * @param[in]
9416  *   Flow attributes.
9417  *
9418  * @return
9419  *   0 on success, a negative errno value otherwise.
9420  */
9421 static int
9422 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
9423                                void *key, const struct rte_flow_item *item,
9424                                const struct rte_flow_attr *attr)
9425 {
9426         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
9427         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
9428         struct mlx5_priv *priv;
9429         uint16_t mask, id;
9430
9431         mask = pid_m ? pid_m->id : 0xffff;
9432         id = pid_v ? pid_v->id : dev->data->port_id;
9433         priv = mlx5_port_to_eswitch_info(id, item == NULL);
9434         if (!priv)
9435                 return -rte_errno;
9436         /*
9437          * Translate to vport field or to metadata, depending on mode.
9438          * Kernel can use either misc.source_port or half of C0 metadata
9439          * register.
9440          */
9441         if (priv->vport_meta_mask) {
9442                 /*
9443                  * Provide the hint for SW steering library
9444                  * to insert the flow into ingress domain and
9445                  * save the extra vport match.
9446                  */
9447                 if (mask == 0xffff && priv->vport_id == 0xffff &&
9448                     priv->pf_bond < 0 && attr->transfer)
9449                         flow_dv_translate_item_source_vport
9450                                 (matcher, key, priv->vport_id, mask);
9451                 /*
9452                  * We should always set the vport metadata register,
9453                  * otherwise the SW steering library can drop
9454                  * the rule if wire vport metadata value is not zero,
9455                  * it depends on kernel configuration.
9456                  */
9457                 flow_dv_translate_item_meta_vport(matcher, key,
9458                                                   priv->vport_meta_tag,
9459                                                   priv->vport_meta_mask);
9460         } else {
9461                 flow_dv_translate_item_source_vport(matcher, key,
9462                                                     priv->vport_id, mask);
9463         }
9464         return 0;
9465 }
9466
9467 /**
9468  * Add ICMP6 item to matcher and to the value.
9469  *
9470  * @param[in, out] matcher
9471  *   Flow matcher.
9472  * @param[in, out] key
9473  *   Flow matcher value.
9474  * @param[in] item
9475  *   Flow pattern to translate.
9476  * @param[in] inner
9477  *   Item is inner pattern.
9478  */
9479 static void
9480 flow_dv_translate_item_icmp6(void *matcher, void *key,
9481                               const struct rte_flow_item *item,
9482                               int inner)
9483 {
9484         const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
9485         const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
9486         void *headers_m;
9487         void *headers_v;
9488         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9489                                      misc_parameters_3);
9490         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9491         if (inner) {
9492                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9493                                          inner_headers);
9494                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9495         } else {
9496                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9497                                          outer_headers);
9498                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9499         }
9500         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
9501         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
9502         if (!icmp6_v)
9503                 return;
9504         if (!icmp6_m)
9505                 icmp6_m = &rte_flow_item_icmp6_mask;
9506         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
9507         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
9508                  icmp6_v->type & icmp6_m->type);
9509         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
9510         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
9511                  icmp6_v->code & icmp6_m->code);
9512 }
9513
9514 /**
9515  * Add ICMP item to matcher and to the value.
9516  *
9517  * @param[in, out] matcher
9518  *   Flow matcher.
9519  * @param[in, out] key
9520  *   Flow matcher value.
9521  * @param[in] item
9522  *   Flow pattern to translate.
9523  * @param[in] inner
9524  *   Item is inner pattern.
9525  */
9526 static void
9527 flow_dv_translate_item_icmp(void *matcher, void *key,
9528                             const struct rte_flow_item *item,
9529                             int inner)
9530 {
9531         const struct rte_flow_item_icmp *icmp_m = item->mask;
9532         const struct rte_flow_item_icmp *icmp_v = item->spec;
9533         uint32_t icmp_header_data_m = 0;
9534         uint32_t icmp_header_data_v = 0;
9535         void *headers_m;
9536         void *headers_v;
9537         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9538                                      misc_parameters_3);
9539         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9540         if (inner) {
9541                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9542                                          inner_headers);
9543                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9544         } else {
9545                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9546                                          outer_headers);
9547                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9548         }
9549         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
9550         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
9551         if (!icmp_v)
9552                 return;
9553         if (!icmp_m)
9554                 icmp_m = &rte_flow_item_icmp_mask;
9555         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
9556                  icmp_m->hdr.icmp_type);
9557         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
9558                  icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
9559         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
9560                  icmp_m->hdr.icmp_code);
9561         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
9562                  icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
9563         icmp_header_data_m = rte_be_to_cpu_16(icmp_m->hdr.icmp_seq_nb);
9564         icmp_header_data_m |= rte_be_to_cpu_16(icmp_m->hdr.icmp_ident) << 16;
9565         if (icmp_header_data_m) {
9566                 icmp_header_data_v = rte_be_to_cpu_16(icmp_v->hdr.icmp_seq_nb);
9567                 icmp_header_data_v |=
9568                          rte_be_to_cpu_16(icmp_v->hdr.icmp_ident) << 16;
9569                 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_header_data,
9570                          icmp_header_data_m);
9571                 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_header_data,
9572                          icmp_header_data_v & icmp_header_data_m);
9573         }
9574 }
9575
9576 /**
9577  * Add GTP item to matcher and to the value.
9578  *
9579  * @param[in, out] matcher
9580  *   Flow matcher.
9581  * @param[in, out] key
9582  *   Flow matcher value.
9583  * @param[in] item
9584  *   Flow pattern to translate.
9585  * @param[in] inner
9586  *   Item is inner pattern.
9587  */
9588 static void
9589 flow_dv_translate_item_gtp(void *matcher, void *key,
9590                            const struct rte_flow_item *item, int inner)
9591 {
9592         const struct rte_flow_item_gtp *gtp_m = item->mask;
9593         const struct rte_flow_item_gtp *gtp_v = item->spec;
9594         void *headers_m;
9595         void *headers_v;
9596         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9597                                      misc_parameters_3);
9598         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9599         uint16_t dport = RTE_GTPU_UDP_PORT;
9600
9601         if (inner) {
9602                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9603                                          inner_headers);
9604                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9605         } else {
9606                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9607                                          outer_headers);
9608                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9609         }
9610         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9611                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
9612                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
9613         }
9614         if (!gtp_v)
9615                 return;
9616         if (!gtp_m)
9617                 gtp_m = &rte_flow_item_gtp_mask;
9618         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags,
9619                  gtp_m->v_pt_rsv_flags);
9620         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags,
9621                  gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags);
9622         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
9623         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
9624                  gtp_v->msg_type & gtp_m->msg_type);
9625         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,
9626                  rte_be_to_cpu_32(gtp_m->teid));
9627         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,
9628                  rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
9629 }
9630
9631 /**
9632  * Add GTP PSC item to matcher.
9633  *
9634  * @param[in, out] matcher
9635  *   Flow matcher.
9636  * @param[in, out] key
9637  *   Flow matcher value.
9638  * @param[in] item
9639  *   Flow pattern to translate.
9640  */
9641 static int
9642 flow_dv_translate_item_gtp_psc(void *matcher, void *key,
9643                                const struct rte_flow_item *item)
9644 {
9645         const struct rte_flow_item_gtp_psc *gtp_psc_m = item->mask;
9646         const struct rte_flow_item_gtp_psc *gtp_psc_v = item->spec;
9647         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9648                         misc_parameters_3);
9649         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9650         union {
9651                 uint32_t w32;
9652                 struct {
9653                         uint16_t seq_num;
9654                         uint8_t npdu_num;
9655                         uint8_t next_ext_header_type;
9656                 };
9657         } dw_2;
9658         uint8_t gtp_flags;
9659
9660         /* Always set E-flag match on one, regardless of GTP item settings. */
9661         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_m, gtpu_msg_flags);
9662         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
9663         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags, gtp_flags);
9664         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_v, gtpu_msg_flags);
9665         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
9666         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags, gtp_flags);
9667         /*Set next extension header type. */
9668         dw_2.seq_num = 0;
9669         dw_2.npdu_num = 0;
9670         dw_2.next_ext_header_type = 0xff;
9671         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_dw_2,
9672                  rte_cpu_to_be_32(dw_2.w32));
9673         dw_2.seq_num = 0;
9674         dw_2.npdu_num = 0;
9675         dw_2.next_ext_header_type = 0x85;
9676         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_dw_2,
9677                  rte_cpu_to_be_32(dw_2.w32));
9678         if (gtp_psc_v) {
9679                 union {
9680                         uint32_t w32;
9681                         struct {
9682                                 uint8_t len;
9683                                 uint8_t type_flags;
9684                                 uint8_t qfi;
9685                                 uint8_t reserved;
9686                         };
9687                 } dw_0;
9688
9689                 /*Set extension header PDU type and Qos. */
9690                 if (!gtp_psc_m)
9691                         gtp_psc_m = &rte_flow_item_gtp_psc_mask;
9692                 dw_0.w32 = 0;
9693                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_m->pdu_type);
9694                 dw_0.qfi = gtp_psc_m->qfi;
9695                 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_first_ext_dw_0,
9696                          rte_cpu_to_be_32(dw_0.w32));
9697                 dw_0.w32 = 0;
9698                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_v->pdu_type &
9699                                                         gtp_psc_m->pdu_type);
9700                 dw_0.qfi = gtp_psc_v->qfi & gtp_psc_m->qfi;
9701                 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_first_ext_dw_0,
9702                          rte_cpu_to_be_32(dw_0.w32));
9703         }
9704         return 0;
9705 }
9706
9707 /**
9708  * Add eCPRI item to matcher and to the value.
9709  *
9710  * @param[in] dev
9711  *   The devich to configure through.
9712  * @param[in, out] matcher
9713  *   Flow matcher.
9714  * @param[in, out] key
9715  *   Flow matcher value.
9716  * @param[in] item
9717  *   Flow pattern to translate.
9718  * @param[in] samples
9719  *   Sample IDs to be used in the matching.
9720  */
9721 static void
9722 flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
9723                              void *key, const struct rte_flow_item *item)
9724 {
9725         struct mlx5_priv *priv = dev->data->dev_private;
9726         const struct rte_flow_item_ecpri *ecpri_m = item->mask;
9727         const struct rte_flow_item_ecpri *ecpri_v = item->spec;
9728         struct rte_ecpri_common_hdr common;
9729         void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
9730                                      misc_parameters_4);
9731         void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
9732         uint32_t *samples;
9733         void *dw_m;
9734         void *dw_v;
9735
9736         if (!ecpri_v)
9737                 return;
9738         if (!ecpri_m)
9739                 ecpri_m = &rte_flow_item_ecpri_mask;
9740         /*
9741          * Maximal four DW samples are supported in a single matching now.
9742          * Two are used now for a eCPRI matching:
9743          * 1. Type: one byte, mask should be 0x00ff0000 in network order
9744          * 2. ID of a message: one or two bytes, mask 0xffff0000 or 0xff000000
9745          *    if any.
9746          */
9747         if (!ecpri_m->hdr.common.u32)
9748                 return;
9749         samples = priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0].ids;
9750         /* Need to take the whole DW as the mask to fill the entry. */
9751         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
9752                             prog_sample_field_value_0);
9753         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
9754                             prog_sample_field_value_0);
9755         /* Already big endian (network order) in the header. */
9756         *(uint32_t *)dw_m = ecpri_m->hdr.common.u32;
9757         *(uint32_t *)dw_v = ecpri_v->hdr.common.u32 & ecpri_m->hdr.common.u32;
9758         /* Sample#0, used for matching type, offset 0. */
9759         MLX5_SET(fte_match_set_misc4, misc4_m,
9760                  prog_sample_field_id_0, samples[0]);
9761         /* It makes no sense to set the sample ID in the mask field. */
9762         MLX5_SET(fte_match_set_misc4, misc4_v,
9763                  prog_sample_field_id_0, samples[0]);
9764         /*
9765          * Checking if message body part needs to be matched.
9766          * Some wildcard rules only matching type field should be supported.
9767          */
9768         if (ecpri_m->hdr.dummy[0]) {
9769                 common.u32 = rte_be_to_cpu_32(ecpri_v->hdr.common.u32);
9770                 switch (common.type) {
9771                 case RTE_ECPRI_MSG_TYPE_IQ_DATA:
9772                 case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
9773                 case RTE_ECPRI_MSG_TYPE_DLY_MSR:
9774                         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
9775                                             prog_sample_field_value_1);
9776                         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
9777                                             prog_sample_field_value_1);
9778                         *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0];
9779                         *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0] &
9780                                             ecpri_m->hdr.dummy[0];
9781                         /* Sample#1, to match message body, offset 4. */
9782                         MLX5_SET(fte_match_set_misc4, misc4_m,
9783                                  prog_sample_field_id_1, samples[1]);
9784                         MLX5_SET(fte_match_set_misc4, misc4_v,
9785                                  prog_sample_field_id_1, samples[1]);
9786                         break;
9787                 default:
9788                         /* Others, do not match any sample ID. */
9789                         break;
9790                 }
9791         }
9792 }
9793
9794 /*
9795  * Add connection tracking status item to matcher
9796  *
9797  * @param[in] dev
9798  *   The devich to configure through.
9799  * @param[in, out] matcher
9800  *   Flow matcher.
9801  * @param[in, out] key
9802  *   Flow matcher value.
9803  * @param[in] item
9804  *   Flow pattern to translate.
9805  */
9806 static void
9807 flow_dv_translate_item_aso_ct(struct rte_eth_dev *dev,
9808                               void *matcher, void *key,
9809                               const struct rte_flow_item *item)
9810 {
9811         uint32_t reg_value = 0;
9812         int reg_id;
9813         /* 8LSB 0b 11/0000/11, middle 4 bits are reserved. */
9814         uint32_t reg_mask = 0;
9815         const struct rte_flow_item_conntrack *spec = item->spec;
9816         const struct rte_flow_item_conntrack *mask = item->mask;
9817         uint32_t flags;
9818         struct rte_flow_error error;
9819
9820         if (!mask)
9821                 mask = &rte_flow_item_conntrack_mask;
9822         if (!spec || !mask->flags)
9823                 return;
9824         flags = spec->flags & mask->flags;
9825         /* The conflict should be checked in the validation. */
9826         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID)
9827                 reg_value |= MLX5_CT_SYNDROME_VALID;
9828         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
9829                 reg_value |= MLX5_CT_SYNDROME_STATE_CHANGE;
9830         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID)
9831                 reg_value |= MLX5_CT_SYNDROME_INVALID;
9832         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)
9833                 reg_value |= MLX5_CT_SYNDROME_TRAP;
9834         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
9835                 reg_value |= MLX5_CT_SYNDROME_BAD_PACKET;
9836         if (mask->flags & (RTE_FLOW_CONNTRACK_PKT_STATE_VALID |
9837                            RTE_FLOW_CONNTRACK_PKT_STATE_INVALID |
9838                            RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED))
9839                 reg_mask |= 0xc0;
9840         if (mask->flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
9841                 reg_mask |= MLX5_CT_SYNDROME_STATE_CHANGE;
9842         if (mask->flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
9843                 reg_mask |= MLX5_CT_SYNDROME_BAD_PACKET;
9844         /* The REG_C_x value could be saved during startup. */
9845         reg_id = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, &error);
9846         if (reg_id == REG_NON)
9847                 return;
9848         flow_dv_match_meta_reg(matcher, key, (enum modify_reg)reg_id,
9849                                reg_value, reg_mask);
9850 }
9851
9852 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
9853
9854 #define HEADER_IS_ZERO(match_criteria, headers)                              \
9855         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
9856                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
9857
9858 /**
9859  * Calculate flow matcher enable bitmap.
9860  *
9861  * @param match_criteria
9862  *   Pointer to flow matcher criteria.
9863  *
9864  * @return
9865  *   Bitmap of enabled fields.
9866  */
9867 static uint8_t
9868 flow_dv_matcher_enable(uint32_t *match_criteria)
9869 {
9870         uint8_t match_criteria_enable;
9871
9872         match_criteria_enable =
9873                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
9874                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
9875         match_criteria_enable |=
9876                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
9877                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
9878         match_criteria_enable |=
9879                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
9880                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
9881         match_criteria_enable |=
9882                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
9883                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
9884         match_criteria_enable |=
9885                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
9886                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
9887         match_criteria_enable |=
9888                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) <<
9889                 MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT;
9890         return match_criteria_enable;
9891 }
9892
9893 struct mlx5_hlist_entry *
9894 flow_dv_tbl_create_cb(struct mlx5_hlist *list, uint64_t key64, void *cb_ctx)
9895 {
9896         struct mlx5_dev_ctx_shared *sh = list->ctx;
9897         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9898         struct rte_eth_dev *dev = ctx->dev;
9899         struct mlx5_flow_tbl_data_entry *tbl_data;
9900         struct mlx5_flow_tbl_tunnel_prm *tt_prm = ctx->data;
9901         struct rte_flow_error *error = ctx->error;
9902         union mlx5_flow_tbl_key key = { .v64 = key64 };
9903         struct mlx5_flow_tbl_resource *tbl;
9904         void *domain;
9905         uint32_t idx = 0;
9906         int ret;
9907
9908         tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
9909         if (!tbl_data) {
9910                 rte_flow_error_set(error, ENOMEM,
9911                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9912                                    NULL,
9913                                    "cannot allocate flow table data entry");
9914                 return NULL;
9915         }
9916         tbl_data->idx = idx;
9917         tbl_data->tunnel = tt_prm->tunnel;
9918         tbl_data->group_id = tt_prm->group_id;
9919         tbl_data->external = !!tt_prm->external;
9920         tbl_data->tunnel_offload = is_tunnel_offload_active(dev);
9921         tbl_data->is_egress = !!key.is_egress;
9922         tbl_data->is_transfer = !!key.is_fdb;
9923         tbl_data->dummy = !!key.dummy;
9924         tbl_data->level = key.level;
9925         tbl_data->id = key.id;
9926         tbl = &tbl_data->tbl;
9927         if (key.dummy)
9928                 return &tbl_data->entry;
9929         if (key.is_fdb)
9930                 domain = sh->fdb_domain;
9931         else if (key.is_egress)
9932                 domain = sh->tx_domain;
9933         else
9934                 domain = sh->rx_domain;
9935         ret = mlx5_flow_os_create_flow_tbl(domain, key.level, &tbl->obj);
9936         if (ret) {
9937                 rte_flow_error_set(error, ENOMEM,
9938                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9939                                    NULL, "cannot create flow table object");
9940                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
9941                 return NULL;
9942         }
9943         if (key.level != 0) {
9944                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
9945                                         (tbl->obj, &tbl_data->jump.action);
9946                 if (ret) {
9947                         rte_flow_error_set(error, ENOMEM,
9948                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9949                                            NULL,
9950                                            "cannot create flow jump action");
9951                         mlx5_flow_os_destroy_flow_tbl(tbl->obj);
9952                         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
9953                         return NULL;
9954                 }
9955         }
9956         MKSTR(matcher_name, "%s_%s_%u_%u_matcher_cache",
9957               key.is_fdb ? "FDB" : "NIC", key.is_egress ? "egress" : "ingress",
9958               key.level, key.id);
9959         mlx5_cache_list_init(&tbl_data->matchers, matcher_name, 0, sh,
9960                              flow_dv_matcher_create_cb,
9961                              flow_dv_matcher_match_cb,
9962                              flow_dv_matcher_remove_cb);
9963         return &tbl_data->entry;
9964 }
9965
9966 int
9967 flow_dv_tbl_match_cb(struct mlx5_hlist *list __rte_unused,
9968                      struct mlx5_hlist_entry *entry, uint64_t key64,
9969                      void *cb_ctx __rte_unused)
9970 {
9971         struct mlx5_flow_tbl_data_entry *tbl_data =
9972                 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
9973         union mlx5_flow_tbl_key key = { .v64 = key64 };
9974
9975         return tbl_data->level != key.level ||
9976                tbl_data->id != key.id ||
9977                tbl_data->dummy != key.dummy ||
9978                tbl_data->is_transfer != !!key.is_fdb ||
9979                tbl_data->is_egress != !!key.is_egress;
9980 }
9981
9982 /**
9983  * Get a flow table.
9984  *
9985  * @param[in, out] dev
9986  *   Pointer to rte_eth_dev structure.
9987  * @param[in] table_level
9988  *   Table level to use.
9989  * @param[in] egress
9990  *   Direction of the table.
9991  * @param[in] transfer
9992  *   E-Switch or NIC flow.
9993  * @param[in] dummy
9994  *   Dummy entry for dv API.
9995  * @param[in] table_id
9996  *   Table id to use.
9997  * @param[out] error
9998  *   pointer to error structure.
9999  *
10000  * @return
10001  *   Returns tables resource based on the index, NULL in case of failed.
10002  */
10003 struct mlx5_flow_tbl_resource *
10004 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
10005                          uint32_t table_level, uint8_t egress,
10006                          uint8_t transfer,
10007                          bool external,
10008                          const struct mlx5_flow_tunnel *tunnel,
10009                          uint32_t group_id, uint8_t dummy,
10010                          uint32_t table_id,
10011                          struct rte_flow_error *error)
10012 {
10013         struct mlx5_priv *priv = dev->data->dev_private;
10014         union mlx5_flow_tbl_key table_key = {
10015                 {
10016                         .level = table_level,
10017                         .id = table_id,
10018                         .reserved = 0,
10019                         .dummy = !!dummy,
10020                         .is_fdb = !!transfer,
10021                         .is_egress = !!egress,
10022                 }
10023         };
10024         struct mlx5_flow_tbl_tunnel_prm tt_prm = {
10025                 .tunnel = tunnel,
10026                 .group_id = group_id,
10027                 .external = external,
10028         };
10029         struct mlx5_flow_cb_ctx ctx = {
10030                 .dev = dev,
10031                 .error = error,
10032                 .data = &tt_prm,
10033         };
10034         struct mlx5_hlist_entry *entry;
10035         struct mlx5_flow_tbl_data_entry *tbl_data;
10036
10037         entry = mlx5_hlist_register(priv->sh->flow_tbls, table_key.v64, &ctx);
10038         if (!entry) {
10039                 rte_flow_error_set(error, ENOMEM,
10040                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10041                                    "cannot get table");
10042                 return NULL;
10043         }
10044         DRV_LOG(DEBUG, "table_level %u table_id %u "
10045                 "tunnel %u group %u registered.",
10046                 table_level, table_id,
10047                 tunnel ? tunnel->tunnel_id : 0, group_id);
10048         tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10049         return &tbl_data->tbl;
10050 }
10051
10052 void
10053 flow_dv_tbl_remove_cb(struct mlx5_hlist *list,
10054                       struct mlx5_hlist_entry *entry)
10055 {
10056         struct mlx5_dev_ctx_shared *sh = list->ctx;
10057         struct mlx5_flow_tbl_data_entry *tbl_data =
10058                 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10059
10060         MLX5_ASSERT(entry && sh);
10061         if (tbl_data->jump.action)
10062                 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
10063         if (tbl_data->tbl.obj)
10064                 mlx5_flow_os_destroy_flow_tbl(tbl_data->tbl.obj);
10065         if (tbl_data->tunnel_offload && tbl_data->external) {
10066                 struct mlx5_hlist_entry *he;
10067                 struct mlx5_hlist *tunnel_grp_hash;
10068                 struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
10069                 union tunnel_tbl_key tunnel_key = {
10070                         .tunnel_id = tbl_data->tunnel ?
10071                                         tbl_data->tunnel->tunnel_id : 0,
10072                         .group = tbl_data->group_id
10073                 };
10074                 uint32_t table_level = tbl_data->level;
10075
10076                 tunnel_grp_hash = tbl_data->tunnel ?
10077                                         tbl_data->tunnel->groups :
10078                                         thub->groups;
10079                 he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val, NULL);
10080                 if (he)
10081                         mlx5_hlist_unregister(tunnel_grp_hash, he);
10082                 DRV_LOG(DEBUG,
10083                         "table_level %u id %u tunnel %u group %u released.",
10084                         table_level,
10085                         tbl_data->id,
10086                         tbl_data->tunnel ?
10087                         tbl_data->tunnel->tunnel_id : 0,
10088                         tbl_data->group_id);
10089         }
10090         mlx5_cache_list_destroy(&tbl_data->matchers);
10091         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
10092 }
10093
10094 /**
10095  * Release a flow table.
10096  *
10097  * @param[in] sh
10098  *   Pointer to device shared structure.
10099  * @param[in] tbl
10100  *   Table resource to be released.
10101  *
10102  * @return
10103  *   Returns 0 if table was released, else return 1;
10104  */
10105 static int
10106 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
10107                              struct mlx5_flow_tbl_resource *tbl)
10108 {
10109         struct mlx5_flow_tbl_data_entry *tbl_data =
10110                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
10111
10112         if (!tbl)
10113                 return 0;
10114         return mlx5_hlist_unregister(sh->flow_tbls, &tbl_data->entry);
10115 }
10116
10117 int
10118 flow_dv_matcher_match_cb(struct mlx5_cache_list *list __rte_unused,
10119                          struct mlx5_cache_entry *entry, void *cb_ctx)
10120 {
10121         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10122         struct mlx5_flow_dv_matcher *ref = ctx->data;
10123         struct mlx5_flow_dv_matcher *cur = container_of(entry, typeof(*cur),
10124                                                         entry);
10125
10126         return cur->crc != ref->crc ||
10127                cur->priority != ref->priority ||
10128                memcmp((const void *)cur->mask.buf,
10129                       (const void *)ref->mask.buf, ref->mask.size);
10130 }
10131
10132 struct mlx5_cache_entry *
10133 flow_dv_matcher_create_cb(struct mlx5_cache_list *list,
10134                           struct mlx5_cache_entry *entry __rte_unused,
10135                           void *cb_ctx)
10136 {
10137         struct mlx5_dev_ctx_shared *sh = list->ctx;
10138         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10139         struct mlx5_flow_dv_matcher *ref = ctx->data;
10140         struct mlx5_flow_dv_matcher *cache;
10141         struct mlx5dv_flow_matcher_attr dv_attr = {
10142                 .type = IBV_FLOW_ATTR_NORMAL,
10143                 .match_mask = (void *)&ref->mask,
10144         };
10145         struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
10146                                                             typeof(*tbl), tbl);
10147         int ret;
10148
10149         cache = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*cache), 0, SOCKET_ID_ANY);
10150         if (!cache) {
10151                 rte_flow_error_set(ctx->error, ENOMEM,
10152                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10153                                    "cannot create matcher");
10154                 return NULL;
10155         }
10156         *cache = *ref;
10157         dv_attr.match_criteria_enable =
10158                 flow_dv_matcher_enable(cache->mask.buf);
10159         dv_attr.priority = ref->priority;
10160         if (tbl->is_egress)
10161                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
10162         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->tbl.obj,
10163                                                &cache->matcher_object);
10164         if (ret) {
10165                 mlx5_free(cache);
10166                 rte_flow_error_set(ctx->error, ENOMEM,
10167                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10168                                    "cannot create matcher");
10169                 return NULL;
10170         }
10171         return &cache->entry;
10172 }
10173
10174 /**
10175  * Register the flow matcher.
10176  *
10177  * @param[in, out] dev
10178  *   Pointer to rte_eth_dev structure.
10179  * @param[in, out] matcher
10180  *   Pointer to flow matcher.
10181  * @param[in, out] key
10182  *   Pointer to flow table key.
10183  * @parm[in, out] dev_flow
10184  *   Pointer to the dev_flow.
10185  * @param[out] error
10186  *   pointer to error structure.
10187  *
10188  * @return
10189  *   0 on success otherwise -errno and errno is set.
10190  */
10191 static int
10192 flow_dv_matcher_register(struct rte_eth_dev *dev,
10193                          struct mlx5_flow_dv_matcher *ref,
10194                          union mlx5_flow_tbl_key *key,
10195                          struct mlx5_flow *dev_flow,
10196                          const struct mlx5_flow_tunnel *tunnel,
10197                          uint32_t group_id,
10198                          struct rte_flow_error *error)
10199 {
10200         struct mlx5_cache_entry *entry;
10201         struct mlx5_flow_dv_matcher *cache;
10202         struct mlx5_flow_tbl_resource *tbl;
10203         struct mlx5_flow_tbl_data_entry *tbl_data;
10204         struct mlx5_flow_cb_ctx ctx = {
10205                 .error = error,
10206                 .data = ref,
10207         };
10208
10209         /**
10210          * tunnel offload API requires this registration for cases when
10211          * tunnel match rule was inserted before tunnel set rule.
10212          */
10213         tbl = flow_dv_tbl_resource_get(dev, key->level,
10214                                        key->is_egress, key->is_fdb,
10215                                        dev_flow->external, tunnel,
10216                                        group_id, 0, key->id, error);
10217         if (!tbl)
10218                 return -rte_errno;      /* No need to refill the error info */
10219         tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
10220         ref->tbl = tbl;
10221         entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
10222         if (!entry) {
10223                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
10224                 return rte_flow_error_set(error, ENOMEM,
10225                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10226                                           "cannot allocate ref memory");
10227         }
10228         cache = container_of(entry, typeof(*cache), entry);
10229         dev_flow->handle->dvh.matcher = cache;
10230         return 0;
10231 }
10232
10233 struct mlx5_hlist_entry *
10234 flow_dv_tag_create_cb(struct mlx5_hlist *list, uint64_t key, void *ctx)
10235 {
10236         struct mlx5_dev_ctx_shared *sh = list->ctx;
10237         struct rte_flow_error *error = ctx;
10238         struct mlx5_flow_dv_tag_resource *entry;
10239         uint32_t idx = 0;
10240         int ret;
10241
10242         entry = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
10243         if (!entry) {
10244                 rte_flow_error_set(error, ENOMEM,
10245                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10246                                    "cannot allocate resource memory");
10247                 return NULL;
10248         }
10249         entry->idx = idx;
10250         entry->tag_id = key;
10251         ret = mlx5_flow_os_create_flow_action_tag(key,
10252                                                   &entry->action);
10253         if (ret) {
10254                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], idx);
10255                 rte_flow_error_set(error, ENOMEM,
10256                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10257                                    NULL, "cannot create action");
10258                 return NULL;
10259         }
10260         return &entry->entry;
10261 }
10262
10263 int
10264 flow_dv_tag_match_cb(struct mlx5_hlist *list __rte_unused,
10265                      struct mlx5_hlist_entry *entry, uint64_t key,
10266                      void *cb_ctx __rte_unused)
10267 {
10268         struct mlx5_flow_dv_tag_resource *tag =
10269                 container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10270
10271         return key != tag->tag_id;
10272 }
10273
10274 /**
10275  * Find existing tag resource or create and register a new one.
10276  *
10277  * @param dev[in, out]
10278  *   Pointer to rte_eth_dev structure.
10279  * @param[in, out] tag_be24
10280  *   Tag value in big endian then R-shift 8.
10281  * @parm[in, out] dev_flow
10282  *   Pointer to the dev_flow.
10283  * @param[out] error
10284  *   pointer to error structure.
10285  *
10286  * @return
10287  *   0 on success otherwise -errno and errno is set.
10288  */
10289 static int
10290 flow_dv_tag_resource_register
10291                         (struct rte_eth_dev *dev,
10292                          uint32_t tag_be24,
10293                          struct mlx5_flow *dev_flow,
10294                          struct rte_flow_error *error)
10295 {
10296         struct mlx5_priv *priv = dev->data->dev_private;
10297         struct mlx5_flow_dv_tag_resource *cache_resource;
10298         struct mlx5_hlist_entry *entry;
10299
10300         entry = mlx5_hlist_register(priv->sh->tag_table, tag_be24, error);
10301         if (entry) {
10302                 cache_resource = container_of
10303                         (entry, struct mlx5_flow_dv_tag_resource, entry);
10304                 dev_flow->handle->dvh.rix_tag = cache_resource->idx;
10305                 dev_flow->dv.tag_resource = cache_resource;
10306                 return 0;
10307         }
10308         return -rte_errno;
10309 }
10310
10311 void
10312 flow_dv_tag_remove_cb(struct mlx5_hlist *list,
10313                       struct mlx5_hlist_entry *entry)
10314 {
10315         struct mlx5_dev_ctx_shared *sh = list->ctx;
10316         struct mlx5_flow_dv_tag_resource *tag =
10317                 container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10318
10319         MLX5_ASSERT(tag && sh && tag->action);
10320         claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
10321         DRV_LOG(DEBUG, "Tag %p: removed.", (void *)tag);
10322         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
10323 }
10324
10325 /**
10326  * Release the tag.
10327  *
10328  * @param dev
10329  *   Pointer to Ethernet device.
10330  * @param tag_idx
10331  *   Tag index.
10332  *
10333  * @return
10334  *   1 while a reference on it exists, 0 when freed.
10335  */
10336 static int
10337 flow_dv_tag_release(struct rte_eth_dev *dev,
10338                     uint32_t tag_idx)
10339 {
10340         struct mlx5_priv *priv = dev->data->dev_private;
10341         struct mlx5_flow_dv_tag_resource *tag;
10342
10343         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
10344         if (!tag)
10345                 return 0;
10346         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
10347                 dev->data->port_id, (void *)tag, tag->entry.ref_cnt);
10348         return mlx5_hlist_unregister(priv->sh->tag_table, &tag->entry);
10349 }
10350
10351 /**
10352  * Translate port ID action to vport.
10353  *
10354  * @param[in] dev
10355  *   Pointer to rte_eth_dev structure.
10356  * @param[in] action
10357  *   Pointer to the port ID action.
10358  * @param[out] dst_port_id
10359  *   The target port ID.
10360  * @param[out] error
10361  *   Pointer to the error structure.
10362  *
10363  * @return
10364  *   0 on success, a negative errno value otherwise and rte_errno is set.
10365  */
10366 static int
10367 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
10368                                  const struct rte_flow_action *action,
10369                                  uint32_t *dst_port_id,
10370                                  struct rte_flow_error *error)
10371 {
10372         uint32_t port;
10373         struct mlx5_priv *priv;
10374         const struct rte_flow_action_port_id *conf =
10375                         (const struct rte_flow_action_port_id *)action->conf;
10376
10377         port = conf->original ? dev->data->port_id : conf->id;
10378         priv = mlx5_port_to_eswitch_info(port, false);
10379         if (!priv)
10380                 return rte_flow_error_set(error, -rte_errno,
10381                                           RTE_FLOW_ERROR_TYPE_ACTION,
10382                                           NULL,
10383                                           "No eswitch info was found for port");
10384 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
10385         /*
10386          * This parameter is transferred to
10387          * mlx5dv_dr_action_create_dest_ib_port().
10388          */
10389         *dst_port_id = priv->dev_port;
10390 #else
10391         /*
10392          * Legacy mode, no LAG configurations is supported.
10393          * This parameter is transferred to
10394          * mlx5dv_dr_action_create_dest_vport().
10395          */
10396         *dst_port_id = priv->vport_id;
10397 #endif
10398         return 0;
10399 }
10400
10401 /**
10402  * Create a counter with aging configuration.
10403  *
10404  * @param[in] dev
10405  *   Pointer to rte_eth_dev structure.
10406  * @param[in] dev_flow
10407  *   Pointer to the mlx5_flow.
10408  * @param[out] count
10409  *   Pointer to the counter action configuration.
10410  * @param[in] age
10411  *   Pointer to the aging action configuration.
10412  *
10413  * @return
10414  *   Index to flow counter on success, 0 otherwise.
10415  */
10416 static uint32_t
10417 flow_dv_translate_create_counter(struct rte_eth_dev *dev,
10418                                 struct mlx5_flow *dev_flow,
10419                                 const struct rte_flow_action_count *count,
10420                                 const struct rte_flow_action_age *age)
10421 {
10422         uint32_t counter;
10423         struct mlx5_age_param *age_param;
10424
10425         if (count && count->shared)
10426                 counter = flow_dv_counter_get_shared(dev, count->id);
10427         else
10428                 counter = flow_dv_counter_alloc(dev, !!age);
10429         if (!counter || age == NULL)
10430                 return counter;
10431         age_param = flow_dv_counter_idx_get_age(dev, counter);
10432         age_param->context = age->context ? age->context :
10433                 (void *)(uintptr_t)(dev_flow->flow_idx);
10434         age_param->timeout = age->timeout;
10435         age_param->port_id = dev->data->port_id;
10436         __atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);
10437         __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
10438         return counter;
10439 }
10440
10441 /**
10442  * Add Tx queue matcher
10443  *
10444  * @param[in] dev
10445  *   Pointer to the dev struct.
10446  * @param[in, out] matcher
10447  *   Flow matcher.
10448  * @param[in, out] key
10449  *   Flow matcher value.
10450  * @param[in] item
10451  *   Flow pattern to translate.
10452  * @param[in] inner
10453  *   Item is inner pattern.
10454  */
10455 static void
10456 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
10457                                 void *matcher, void *key,
10458                                 const struct rte_flow_item *item)
10459 {
10460         const struct mlx5_rte_flow_item_tx_queue *queue_m;
10461         const struct mlx5_rte_flow_item_tx_queue *queue_v;
10462         void *misc_m =
10463                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
10464         void *misc_v =
10465                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
10466         struct mlx5_txq_ctrl *txq;
10467         uint32_t queue;
10468
10469
10470         queue_m = (const void *)item->mask;
10471         if (!queue_m)
10472                 return;
10473         queue_v = (const void *)item->spec;
10474         if (!queue_v)
10475                 return;
10476         txq = mlx5_txq_get(dev, queue_v->queue);
10477         if (!txq)
10478                 return;
10479         queue = txq->obj->sq->id;
10480         MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue);
10481         MLX5_SET(fte_match_set_misc, misc_v, source_sqn,
10482                  queue & queue_m->queue);
10483         mlx5_txq_release(dev, queue_v->queue);
10484 }
10485
10486 /**
10487  * Set the hash fields according to the @p flow information.
10488  *
10489  * @param[in] dev_flow
10490  *   Pointer to the mlx5_flow.
10491  * @param[in] rss_desc
10492  *   Pointer to the mlx5_flow_rss_desc.
10493  */
10494 static void
10495 flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
10496                        struct mlx5_flow_rss_desc *rss_desc)
10497 {
10498         uint64_t items = dev_flow->handle->layers;
10499         int rss_inner = 0;
10500         uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
10501
10502         dev_flow->hash_fields = 0;
10503 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
10504         if (rss_desc->level >= 2) {
10505                 dev_flow->hash_fields |= IBV_RX_HASH_INNER;
10506                 rss_inner = 1;
10507         }
10508 #endif
10509         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
10510             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
10511                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
10512                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
10513                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
10514                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
10515                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
10516                         else
10517                                 dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
10518                 }
10519         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
10520                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
10521                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
10522                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
10523                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
10524                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
10525                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
10526                         else
10527                                 dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
10528                 }
10529         }
10530         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
10531             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
10532                 if (rss_types & ETH_RSS_UDP) {
10533                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
10534                                 dev_flow->hash_fields |=
10535                                                 IBV_RX_HASH_SRC_PORT_UDP;
10536                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
10537                                 dev_flow->hash_fields |=
10538                                                 IBV_RX_HASH_DST_PORT_UDP;
10539                         else
10540                                 dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
10541                 }
10542         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
10543                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
10544                 if (rss_types & ETH_RSS_TCP) {
10545                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
10546                                 dev_flow->hash_fields |=
10547                                                 IBV_RX_HASH_SRC_PORT_TCP;
10548                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
10549                                 dev_flow->hash_fields |=
10550                                                 IBV_RX_HASH_DST_PORT_TCP;
10551                         else
10552                                 dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
10553                 }
10554         }
10555 }
10556
10557 /**
10558  * Prepare an Rx Hash queue.
10559  *
10560  * @param dev
10561  *   Pointer to Ethernet device.
10562  * @param[in] dev_flow
10563  *   Pointer to the mlx5_flow.
10564  * @param[in] rss_desc
10565  *   Pointer to the mlx5_flow_rss_desc.
10566  * @param[out] hrxq_idx
10567  *   Hash Rx queue index.
10568  *
10569  * @return
10570  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
10571  */
10572 static struct mlx5_hrxq *
10573 flow_dv_hrxq_prepare(struct rte_eth_dev *dev,
10574                      struct mlx5_flow *dev_flow,
10575                      struct mlx5_flow_rss_desc *rss_desc,
10576                      uint32_t *hrxq_idx)
10577 {
10578         struct mlx5_priv *priv = dev->data->dev_private;
10579         struct mlx5_flow_handle *dh = dev_flow->handle;
10580         struct mlx5_hrxq *hrxq;
10581
10582         MLX5_ASSERT(rss_desc->queue_num);
10583         rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
10584         rss_desc->hash_fields = dev_flow->hash_fields;
10585         rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL);
10586         rss_desc->shared_rss = 0;
10587         *hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
10588         if (!*hrxq_idx)
10589                 return NULL;
10590         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
10591                               *hrxq_idx);
10592         return hrxq;
10593 }
10594
10595 /**
10596  * Release sample sub action resource.
10597  *
10598  * @param[in, out] dev
10599  *   Pointer to rte_eth_dev structure.
10600  * @param[in] act_res
10601  *   Pointer to sample sub action resource.
10602  */
10603 static void
10604 flow_dv_sample_sub_actions_release(struct rte_eth_dev *dev,
10605                                    struct mlx5_flow_sub_actions_idx *act_res)
10606 {
10607         if (act_res->rix_hrxq) {
10608                 mlx5_hrxq_release(dev, act_res->rix_hrxq);
10609                 act_res->rix_hrxq = 0;
10610         }
10611         if (act_res->rix_encap_decap) {
10612                 flow_dv_encap_decap_resource_release(dev,
10613                                                      act_res->rix_encap_decap);
10614                 act_res->rix_encap_decap = 0;
10615         }
10616         if (act_res->rix_port_id_action) {
10617                 flow_dv_port_id_action_resource_release(dev,
10618                                                 act_res->rix_port_id_action);
10619                 act_res->rix_port_id_action = 0;
10620         }
10621         if (act_res->rix_tag) {
10622                 flow_dv_tag_release(dev, act_res->rix_tag);
10623                 act_res->rix_tag = 0;
10624         }
10625         if (act_res->rix_jump) {
10626                 flow_dv_jump_tbl_resource_release(dev, act_res->rix_jump);
10627                 act_res->rix_jump = 0;
10628         }
10629 }
10630
10631 int
10632 flow_dv_sample_match_cb(struct mlx5_cache_list *list __rte_unused,
10633                         struct mlx5_cache_entry *entry, void *cb_ctx)
10634 {
10635         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10636         struct rte_eth_dev *dev = ctx->dev;
10637         struct mlx5_flow_dv_sample_resource *resource = ctx->data;
10638         struct mlx5_flow_dv_sample_resource *cache_resource =
10639                         container_of(entry, typeof(*cache_resource), entry);
10640
10641         if (resource->ratio == cache_resource->ratio &&
10642             resource->ft_type == cache_resource->ft_type &&
10643             resource->ft_id == cache_resource->ft_id &&
10644             resource->set_action == cache_resource->set_action &&
10645             !memcmp((void *)&resource->sample_act,
10646                     (void *)&cache_resource->sample_act,
10647                     sizeof(struct mlx5_flow_sub_actions_list))) {
10648                 /*
10649                  * Existing sample action should release the prepared
10650                  * sub-actions reference counter.
10651                  */
10652                 flow_dv_sample_sub_actions_release(dev,
10653                                                 &resource->sample_idx);
10654                 return 0;
10655         }
10656         return 1;
10657 }
10658
10659 struct mlx5_cache_entry *
10660 flow_dv_sample_create_cb(struct mlx5_cache_list *list __rte_unused,
10661                          struct mlx5_cache_entry *entry __rte_unused,
10662                          void *cb_ctx)
10663 {
10664         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10665         struct rte_eth_dev *dev = ctx->dev;
10666         struct mlx5_flow_dv_sample_resource *resource = ctx->data;
10667         void **sample_dv_actions = resource->sub_actions;
10668         struct mlx5_flow_dv_sample_resource *cache_resource;
10669         struct mlx5dv_dr_flow_sampler_attr sampler_attr;
10670         struct mlx5_priv *priv = dev->data->dev_private;
10671         struct mlx5_dev_ctx_shared *sh = priv->sh;
10672         struct mlx5_flow_tbl_resource *tbl;
10673         uint32_t idx = 0;
10674         const uint32_t next_ft_step = 1;
10675         uint32_t next_ft_id = resource->ft_id + next_ft_step;
10676         uint8_t is_egress = 0;
10677         uint8_t is_transfer = 0;
10678         struct rte_flow_error *error = ctx->error;
10679
10680         /* Register new sample resource. */
10681         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
10682         if (!cache_resource) {
10683                 rte_flow_error_set(error, ENOMEM,
10684                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10685                                           NULL,
10686                                           "cannot allocate resource memory");
10687                 return NULL;
10688         }
10689         *cache_resource = *resource;
10690         /* Create normal path table level */
10691         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
10692                 is_transfer = 1;
10693         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
10694                 is_egress = 1;
10695         tbl = flow_dv_tbl_resource_get(dev, next_ft_id,
10696                                         is_egress, is_transfer,
10697                                         true, NULL, 0, 0, 0, error);
10698         if (!tbl) {
10699                 rte_flow_error_set(error, ENOMEM,
10700                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10701                                           NULL,
10702                                           "fail to create normal path table "
10703                                           "for sample");
10704                 goto error;
10705         }
10706         cache_resource->normal_path_tbl = tbl;
10707         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
10708                 if (!sh->default_miss_action) {
10709                         rte_flow_error_set(error, ENOMEM,
10710                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10711                                                 NULL,
10712                                                 "default miss action was not "
10713                                                 "created");
10714                         goto error;
10715                 }
10716                 sample_dv_actions[resource->sample_act.actions_num++] =
10717                                                 sh->default_miss_action;
10718         }
10719         /* Create a DR sample action */
10720         sampler_attr.sample_ratio = cache_resource->ratio;
10721         sampler_attr.default_next_table = tbl->obj;
10722         sampler_attr.num_sample_actions = resource->sample_act.actions_num;
10723         sampler_attr.sample_actions = (struct mlx5dv_dr_action **)
10724                                                         &sample_dv_actions[0];
10725         sampler_attr.action = cache_resource->set_action;
10726         if (mlx5_os_flow_dr_create_flow_action_sampler
10727                         (&sampler_attr, &cache_resource->verbs_action)) {
10728                 rte_flow_error_set(error, ENOMEM,
10729                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10730                                         NULL, "cannot create sample action");
10731                 goto error;
10732         }
10733         cache_resource->idx = idx;
10734         cache_resource->dev = dev;
10735         return &cache_resource->entry;
10736 error:
10737         if (cache_resource->ft_type != MLX5DV_FLOW_TABLE_TYPE_FDB)
10738                 flow_dv_sample_sub_actions_release(dev,
10739                                                    &cache_resource->sample_idx);
10740         if (cache_resource->normal_path_tbl)
10741                 flow_dv_tbl_resource_release(MLX5_SH(dev),
10742                                 cache_resource->normal_path_tbl);
10743         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE], idx);
10744         return NULL;
10745
10746 }
10747
10748 /**
10749  * Find existing sample resource or create and register a new one.
10750  *
10751  * @param[in, out] dev
10752  *   Pointer to rte_eth_dev structure.
10753  * @param[in] resource
10754  *   Pointer to sample resource.
10755  * @parm[in, out] dev_flow
10756  *   Pointer to the dev_flow.
10757  * @param[out] error
10758  *   pointer to error structure.
10759  *
10760  * @return
10761  *   0 on success otherwise -errno and errno is set.
10762  */
10763 static int
10764 flow_dv_sample_resource_register(struct rte_eth_dev *dev,
10765                          struct mlx5_flow_dv_sample_resource *resource,
10766                          struct mlx5_flow *dev_flow,
10767                          struct rte_flow_error *error)
10768 {
10769         struct mlx5_flow_dv_sample_resource *cache_resource;
10770         struct mlx5_cache_entry *entry;
10771         struct mlx5_priv *priv = dev->data->dev_private;
10772         struct mlx5_flow_cb_ctx ctx = {
10773                 .dev = dev,
10774                 .error = error,
10775                 .data = resource,
10776         };
10777
10778         entry = mlx5_cache_register(&priv->sh->sample_action_list, &ctx);
10779         if (!entry)
10780                 return -rte_errno;
10781         cache_resource = container_of(entry, typeof(*cache_resource), entry);
10782         dev_flow->handle->dvh.rix_sample = cache_resource->idx;
10783         dev_flow->dv.sample_res = cache_resource;
10784         return 0;
10785 }
10786
10787 int
10788 flow_dv_dest_array_match_cb(struct mlx5_cache_list *list __rte_unused,
10789                             struct mlx5_cache_entry *entry, void *cb_ctx)
10790 {
10791         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10792         struct mlx5_flow_dv_dest_array_resource *resource = ctx->data;
10793         struct rte_eth_dev *dev = ctx->dev;
10794         struct mlx5_flow_dv_dest_array_resource *cache_resource =
10795                         container_of(entry, typeof(*cache_resource), entry);
10796         uint32_t idx = 0;
10797
10798         if (resource->num_of_dest == cache_resource->num_of_dest &&
10799             resource->ft_type == cache_resource->ft_type &&
10800             !memcmp((void *)cache_resource->sample_act,
10801                     (void *)resource->sample_act,
10802                    (resource->num_of_dest *
10803                    sizeof(struct mlx5_flow_sub_actions_list)))) {
10804                 /*
10805                  * Existing sample action should release the prepared
10806                  * sub-actions reference counter.
10807                  */
10808                 for (idx = 0; idx < resource->num_of_dest; idx++)
10809                         flow_dv_sample_sub_actions_release(dev,
10810                                         &resource->sample_idx[idx]);
10811                 return 0;
10812         }
10813         return 1;
10814 }
10815
10816 struct mlx5_cache_entry *
10817 flow_dv_dest_array_create_cb(struct mlx5_cache_list *list __rte_unused,
10818                          struct mlx5_cache_entry *entry __rte_unused,
10819                          void *cb_ctx)
10820 {
10821         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10822         struct rte_eth_dev *dev = ctx->dev;
10823         struct mlx5_flow_dv_dest_array_resource *cache_resource;
10824         struct mlx5_flow_dv_dest_array_resource *resource = ctx->data;
10825         struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 };
10826         struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM];
10827         struct mlx5_priv *priv = dev->data->dev_private;
10828         struct mlx5_dev_ctx_shared *sh = priv->sh;
10829         struct mlx5_flow_sub_actions_list *sample_act;
10830         struct mlx5dv_dr_domain *domain;
10831         uint32_t idx = 0, res_idx = 0;
10832         struct rte_flow_error *error = ctx->error;
10833         uint64_t action_flags;
10834         int ret;
10835
10836         /* Register new destination array resource. */
10837         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
10838                                             &res_idx);
10839         if (!cache_resource) {
10840                 rte_flow_error_set(error, ENOMEM,
10841                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10842                                           NULL,
10843                                           "cannot allocate resource memory");
10844                 return NULL;
10845         }
10846         *cache_resource = *resource;
10847         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
10848                 domain = sh->fdb_domain;
10849         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
10850                 domain = sh->rx_domain;
10851         else
10852                 domain = sh->tx_domain;
10853         for (idx = 0; idx < resource->num_of_dest; idx++) {
10854                 dest_attr[idx] = (struct mlx5dv_dr_action_dest_attr *)
10855                                  mlx5_malloc(MLX5_MEM_ZERO,
10856                                  sizeof(struct mlx5dv_dr_action_dest_attr),
10857                                  0, SOCKET_ID_ANY);
10858                 if (!dest_attr[idx]) {
10859                         rte_flow_error_set(error, ENOMEM,
10860                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10861                                            NULL,
10862                                            "cannot allocate resource memory");
10863                         goto error;
10864                 }
10865                 dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST;
10866                 sample_act = &resource->sample_act[idx];
10867                 action_flags = sample_act->action_flags;
10868                 switch (action_flags) {
10869                 case MLX5_FLOW_ACTION_QUEUE:
10870                         dest_attr[idx]->dest = sample_act->dr_queue_action;
10871                         break;
10872                 case (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_ENCAP):
10873                         dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST_REFORMAT;
10874                         dest_attr[idx]->dest_reformat = &dest_reformat[idx];
10875                         dest_attr[idx]->dest_reformat->reformat =
10876                                         sample_act->dr_encap_action;
10877                         dest_attr[idx]->dest_reformat->dest =
10878                                         sample_act->dr_port_id_action;
10879                         break;
10880                 case MLX5_FLOW_ACTION_PORT_ID:
10881                         dest_attr[idx]->dest = sample_act->dr_port_id_action;
10882                         break;
10883                 case MLX5_FLOW_ACTION_JUMP:
10884                         dest_attr[idx]->dest = sample_act->dr_jump_action;
10885                         break;
10886                 default:
10887                         rte_flow_error_set(error, EINVAL,
10888                                            RTE_FLOW_ERROR_TYPE_ACTION,
10889                                            NULL,
10890                                            "unsupported actions type");
10891                         goto error;
10892                 }
10893         }
10894         /* create a dest array actioin */
10895         ret = mlx5_os_flow_dr_create_flow_action_dest_array
10896                                                 (domain,
10897                                                  cache_resource->num_of_dest,
10898                                                  dest_attr,
10899                                                  &cache_resource->action);
10900         if (ret) {
10901                 rte_flow_error_set(error, ENOMEM,
10902                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10903                                    NULL,
10904                                    "cannot create destination array action");
10905                 goto error;
10906         }
10907         cache_resource->idx = res_idx;
10908         cache_resource->dev = dev;
10909         for (idx = 0; idx < resource->num_of_dest; idx++)
10910                 mlx5_free(dest_attr[idx]);
10911         return &cache_resource->entry;
10912 error:
10913         for (idx = 0; idx < resource->num_of_dest; idx++) {
10914                 flow_dv_sample_sub_actions_release(dev,
10915                                 &cache_resource->sample_idx[idx]);
10916                 if (dest_attr[idx])
10917                         mlx5_free(dest_attr[idx]);
10918         }
10919
10920         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY], res_idx);
10921         return NULL;
10922 }
10923
10924 /**
10925  * Find existing destination array resource or create and register a new one.
10926  *
10927  * @param[in, out] dev
10928  *   Pointer to rte_eth_dev structure.
10929  * @param[in] resource
10930  *   Pointer to destination array resource.
10931  * @parm[in, out] dev_flow
10932  *   Pointer to the dev_flow.
10933  * @param[out] error
10934  *   pointer to error structure.
10935  *
10936  * @return
10937  *   0 on success otherwise -errno and errno is set.
10938  */
10939 static int
10940 flow_dv_dest_array_resource_register(struct rte_eth_dev *dev,
10941                          struct mlx5_flow_dv_dest_array_resource *resource,
10942                          struct mlx5_flow *dev_flow,
10943                          struct rte_flow_error *error)
10944 {
10945         struct mlx5_flow_dv_dest_array_resource *cache_resource;
10946         struct mlx5_priv *priv = dev->data->dev_private;
10947         struct mlx5_cache_entry *entry;
10948         struct mlx5_flow_cb_ctx ctx = {
10949                 .dev = dev,
10950                 .error = error,
10951                 .data = resource,
10952         };
10953
10954         entry = mlx5_cache_register(&priv->sh->dest_array_list, &ctx);
10955         if (!entry)
10956                 return -rte_errno;
10957         cache_resource = container_of(entry, typeof(*cache_resource), entry);
10958         dev_flow->handle->dvh.rix_dest_array = cache_resource->idx;
10959         dev_flow->dv.dest_array_res = cache_resource;
10960         return 0;
10961 }
10962
10963 /**
10964  * Convert Sample action to DV specification.
10965  *
10966  * @param[in] dev
10967  *   Pointer to rte_eth_dev structure.
10968  * @param[in] action
10969  *   Pointer to sample action structure.
10970  * @param[in, out] dev_flow
10971  *   Pointer to the mlx5_flow.
10972  * @param[in] attr
10973  *   Pointer to the flow attributes.
10974  * @param[in, out] num_of_dest
10975  *   Pointer to the num of destination.
10976  * @param[in, out] sample_actions
10977  *   Pointer to sample actions list.
10978  * @param[in, out] res
10979  *   Pointer to sample resource.
10980  * @param[out] error
10981  *   Pointer to the error structure.
10982  *
10983  * @return
10984  *   0 on success, a negative errno value otherwise and rte_errno is set.
10985  */
10986 static int
10987 flow_dv_translate_action_sample(struct rte_eth_dev *dev,
10988                                 const struct rte_flow_action_sample *action,
10989                                 struct mlx5_flow *dev_flow,
10990                                 const struct rte_flow_attr *attr,
10991                                 uint32_t *num_of_dest,
10992                                 void **sample_actions,
10993                                 struct mlx5_flow_dv_sample_resource *res,
10994                                 struct rte_flow_error *error)
10995 {
10996         struct mlx5_priv *priv = dev->data->dev_private;
10997         const struct rte_flow_action *sub_actions;
10998         struct mlx5_flow_sub_actions_list *sample_act;
10999         struct mlx5_flow_sub_actions_idx *sample_idx;
11000         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
11001         struct rte_flow *flow = dev_flow->flow;
11002         struct mlx5_flow_rss_desc *rss_desc;
11003         uint64_t action_flags = 0;
11004
11005         MLX5_ASSERT(wks);
11006         rss_desc = &wks->rss_desc;
11007         sample_act = &res->sample_act;
11008         sample_idx = &res->sample_idx;
11009         res->ratio = action->ratio;
11010         sub_actions = action->actions;
11011         for (; sub_actions->type != RTE_FLOW_ACTION_TYPE_END; sub_actions++) {
11012                 int type = sub_actions->type;
11013                 uint32_t pre_rix = 0;
11014                 void *pre_r;
11015                 switch (type) {
11016                 case RTE_FLOW_ACTION_TYPE_QUEUE:
11017                 {
11018                         const struct rte_flow_action_queue *queue;
11019                         struct mlx5_hrxq *hrxq;
11020                         uint32_t hrxq_idx;
11021
11022                         queue = sub_actions->conf;
11023                         rss_desc->queue_num = 1;
11024                         rss_desc->queue[0] = queue->index;
11025                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11026                                                     rss_desc, &hrxq_idx);
11027                         if (!hrxq)
11028                                 return rte_flow_error_set
11029                                         (error, rte_errno,
11030                                          RTE_FLOW_ERROR_TYPE_ACTION,
11031                                          NULL,
11032                                          "cannot create fate queue");
11033                         sample_act->dr_queue_action = hrxq->action;
11034                         sample_idx->rix_hrxq = hrxq_idx;
11035                         sample_actions[sample_act->actions_num++] =
11036                                                 hrxq->action;
11037                         (*num_of_dest)++;
11038                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
11039                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11040                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11041                         dev_flow->handle->fate_action =
11042                                         MLX5_FLOW_FATE_QUEUE;
11043                         break;
11044                 }
11045                 case RTE_FLOW_ACTION_TYPE_RSS:
11046                 {
11047                         struct mlx5_hrxq *hrxq;
11048                         uint32_t hrxq_idx;
11049                         const struct rte_flow_action_rss *rss;
11050                         const uint8_t *rss_key;
11051
11052                         rss = sub_actions->conf;
11053                         memcpy(rss_desc->queue, rss->queue,
11054                                rss->queue_num * sizeof(uint16_t));
11055                         rss_desc->queue_num = rss->queue_num;
11056                         /* NULL RSS key indicates default RSS key. */
11057                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
11058                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
11059                         /*
11060                          * rss->level and rss.types should be set in advance
11061                          * when expanding items for RSS.
11062                          */
11063                         flow_dv_hashfields_set(dev_flow, rss_desc);
11064                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11065                                                     rss_desc, &hrxq_idx);
11066                         if (!hrxq)
11067                                 return rte_flow_error_set
11068                                         (error, rte_errno,
11069                                          RTE_FLOW_ERROR_TYPE_ACTION,
11070                                          NULL,
11071                                          "cannot create fate queue");
11072                         sample_act->dr_queue_action = hrxq->action;
11073                         sample_idx->rix_hrxq = hrxq_idx;
11074                         sample_actions[sample_act->actions_num++] =
11075                                                 hrxq->action;
11076                         (*num_of_dest)++;
11077                         action_flags |= MLX5_FLOW_ACTION_RSS;
11078                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11079                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11080                         dev_flow->handle->fate_action =
11081                                         MLX5_FLOW_FATE_QUEUE;
11082                         break;
11083                 }
11084                 case RTE_FLOW_ACTION_TYPE_MARK:
11085                 {
11086                         uint32_t tag_be = mlx5_flow_mark_set
11087                                 (((const struct rte_flow_action_mark *)
11088                                 (sub_actions->conf))->id);
11089
11090                         dev_flow->handle->mark = 1;
11091                         pre_rix = dev_flow->handle->dvh.rix_tag;
11092                         /* Save the mark resource before sample */
11093                         pre_r = dev_flow->dv.tag_resource;
11094                         if (flow_dv_tag_resource_register(dev, tag_be,
11095                                                   dev_flow, error))
11096                                 return -rte_errno;
11097                         MLX5_ASSERT(dev_flow->dv.tag_resource);
11098                         sample_act->dr_tag_action =
11099                                 dev_flow->dv.tag_resource->action;
11100                         sample_idx->rix_tag =
11101                                 dev_flow->handle->dvh.rix_tag;
11102                         sample_actions[sample_act->actions_num++] =
11103                                                 sample_act->dr_tag_action;
11104                         /* Recover the mark resource after sample */
11105                         dev_flow->dv.tag_resource = pre_r;
11106                         dev_flow->handle->dvh.rix_tag = pre_rix;
11107                         action_flags |= MLX5_FLOW_ACTION_MARK;
11108                         break;
11109                 }
11110                 case RTE_FLOW_ACTION_TYPE_COUNT:
11111                 {
11112                         if (!flow->counter) {
11113                                 flow->counter =
11114                                         flow_dv_translate_create_counter(dev,
11115                                                 dev_flow, sub_actions->conf,
11116                                                 0);
11117                                 if (!flow->counter)
11118                                         return rte_flow_error_set
11119                                                 (error, rte_errno,
11120                                                 RTE_FLOW_ERROR_TYPE_ACTION,
11121                                                 NULL,
11122                                                 "cannot create counter"
11123                                                 " object.");
11124                         }
11125                         sample_act->dr_cnt_action =
11126                                   (flow_dv_counter_get_by_idx(dev,
11127                                   flow->counter, NULL))->action;
11128                         sample_actions[sample_act->actions_num++] =
11129                                                 sample_act->dr_cnt_action;
11130                         action_flags |= MLX5_FLOW_ACTION_COUNT;
11131                         break;
11132                 }
11133                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
11134                 {
11135                         struct mlx5_flow_dv_port_id_action_resource
11136                                         port_id_resource;
11137                         uint32_t port_id = 0;
11138
11139                         memset(&port_id_resource, 0, sizeof(port_id_resource));
11140                         /* Save the port id resource before sample */
11141                         pre_rix = dev_flow->handle->rix_port_id_action;
11142                         pre_r = dev_flow->dv.port_id_action;
11143                         if (flow_dv_translate_action_port_id(dev, sub_actions,
11144                                                              &port_id, error))
11145                                 return -rte_errno;
11146                         port_id_resource.port_id = port_id;
11147                         if (flow_dv_port_id_action_resource_register
11148                             (dev, &port_id_resource, dev_flow, error))
11149                                 return -rte_errno;
11150                         sample_act->dr_port_id_action =
11151                                 dev_flow->dv.port_id_action->action;
11152                         sample_idx->rix_port_id_action =
11153                                 dev_flow->handle->rix_port_id_action;
11154                         sample_actions[sample_act->actions_num++] =
11155                                                 sample_act->dr_port_id_action;
11156                         /* Recover the port id resource after sample */
11157                         dev_flow->dv.port_id_action = pre_r;
11158                         dev_flow->handle->rix_port_id_action = pre_rix;
11159                         (*num_of_dest)++;
11160                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
11161                         break;
11162                 }
11163                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
11164                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
11165                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
11166                         /* Save the encap resource before sample */
11167                         pre_rix = dev_flow->handle->dvh.rix_encap_decap;
11168                         pre_r = dev_flow->dv.encap_decap;
11169                         if (flow_dv_create_action_l2_encap(dev, sub_actions,
11170                                                            dev_flow,
11171                                                            attr->transfer,
11172                                                            error))
11173                                 return -rte_errno;
11174                         sample_act->dr_encap_action =
11175                                 dev_flow->dv.encap_decap->action;
11176                         sample_idx->rix_encap_decap =
11177                                 dev_flow->handle->dvh.rix_encap_decap;
11178                         sample_actions[sample_act->actions_num++] =
11179                                                 sample_act->dr_encap_action;
11180                         /* Recover the encap resource after sample */
11181                         dev_flow->dv.encap_decap = pre_r;
11182                         dev_flow->handle->dvh.rix_encap_decap = pre_rix;
11183                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
11184                         break;
11185                 default:
11186                         return rte_flow_error_set(error, EINVAL,
11187                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11188                                 NULL,
11189                                 "Not support for sampler action");
11190                 }
11191         }
11192         sample_act->action_flags = action_flags;
11193         res->ft_id = dev_flow->dv.group;
11194         if (attr->transfer) {
11195                 union {
11196                         uint32_t action_in[MLX5_ST_SZ_DW(set_action_in)];
11197                         uint64_t set_action;
11198                 } action_ctx = { .set_action = 0 };
11199
11200                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
11201                 MLX5_SET(set_action_in, action_ctx.action_in, action_type,
11202                          MLX5_MODIFICATION_TYPE_SET);
11203                 MLX5_SET(set_action_in, action_ctx.action_in, field,
11204                          MLX5_MODI_META_REG_C_0);
11205                 MLX5_SET(set_action_in, action_ctx.action_in, data,
11206                          priv->vport_meta_tag);
11207                 res->set_action = action_ctx.set_action;
11208         } else if (attr->ingress) {
11209                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
11210         } else {
11211                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX;
11212         }
11213         return 0;
11214 }
11215
11216 /**
11217  * Convert Sample action to DV specification.
11218  *
11219  * @param[in] dev
11220  *   Pointer to rte_eth_dev structure.
11221  * @param[in, out] dev_flow
11222  *   Pointer to the mlx5_flow.
11223  * @param[in] num_of_dest
11224  *   The num of destination.
11225  * @param[in, out] res
11226  *   Pointer to sample resource.
11227  * @param[in, out] mdest_res
11228  *   Pointer to destination array resource.
11229  * @param[in] sample_actions
11230  *   Pointer to sample path actions list.
11231  * @param[in] action_flags
11232  *   Holds the actions detected until now.
11233  * @param[out] error
11234  *   Pointer to the error structure.
11235  *
11236  * @return
11237  *   0 on success, a negative errno value otherwise and rte_errno is set.
11238  */
11239 static int
11240 flow_dv_create_action_sample(struct rte_eth_dev *dev,
11241                              struct mlx5_flow *dev_flow,
11242                              uint32_t num_of_dest,
11243                              struct mlx5_flow_dv_sample_resource *res,
11244                              struct mlx5_flow_dv_dest_array_resource *mdest_res,
11245                              void **sample_actions,
11246                              uint64_t action_flags,
11247                              struct rte_flow_error *error)
11248 {
11249         /* update normal path action resource into last index of array */
11250         uint32_t dest_index = MLX5_MAX_DEST_NUM - 1;
11251         struct mlx5_flow_sub_actions_list *sample_act =
11252                                         &mdest_res->sample_act[dest_index];
11253         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
11254         struct mlx5_flow_rss_desc *rss_desc;
11255         uint32_t normal_idx = 0;
11256         struct mlx5_hrxq *hrxq;
11257         uint32_t hrxq_idx;
11258
11259         MLX5_ASSERT(wks);
11260         rss_desc = &wks->rss_desc;
11261         if (num_of_dest > 1) {
11262                 if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
11263                         /* Handle QP action for mirroring */
11264                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11265                                                     rss_desc, &hrxq_idx);
11266                         if (!hrxq)
11267                                 return rte_flow_error_set
11268                                      (error, rte_errno,
11269                                       RTE_FLOW_ERROR_TYPE_ACTION,
11270                                       NULL,
11271                                       "cannot create rx queue");
11272                         normal_idx++;
11273                         mdest_res->sample_idx[dest_index].rix_hrxq = hrxq_idx;
11274                         sample_act->dr_queue_action = hrxq->action;
11275                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11276                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11277                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
11278                 }
11279                 if (sample_act->action_flags & MLX5_FLOW_ACTION_ENCAP) {
11280                         normal_idx++;
11281                         mdest_res->sample_idx[dest_index].rix_encap_decap =
11282                                 dev_flow->handle->dvh.rix_encap_decap;
11283                         sample_act->dr_encap_action =
11284                                 dev_flow->dv.encap_decap->action;
11285                         dev_flow->handle->dvh.rix_encap_decap = 0;
11286                 }
11287                 if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) {
11288                         normal_idx++;
11289                         mdest_res->sample_idx[dest_index].rix_port_id_action =
11290                                 dev_flow->handle->rix_port_id_action;
11291                         sample_act->dr_port_id_action =
11292                                 dev_flow->dv.port_id_action->action;
11293                         dev_flow->handle->rix_port_id_action = 0;
11294                 }
11295                 if (sample_act->action_flags & MLX5_FLOW_ACTION_JUMP) {
11296                         normal_idx++;
11297                         mdest_res->sample_idx[dest_index].rix_jump =
11298                                 dev_flow->handle->rix_jump;
11299                         sample_act->dr_jump_action =
11300                                 dev_flow->dv.jump->action;
11301                         dev_flow->handle->rix_jump = 0;
11302                 }
11303                 sample_act->actions_num = normal_idx;
11304                 /* update sample action resource into first index of array */
11305                 mdest_res->ft_type = res->ft_type;
11306                 memcpy(&mdest_res->sample_idx[0], &res->sample_idx,
11307                                 sizeof(struct mlx5_flow_sub_actions_idx));
11308                 memcpy(&mdest_res->sample_act[0], &res->sample_act,
11309                                 sizeof(struct mlx5_flow_sub_actions_list));
11310                 mdest_res->num_of_dest = num_of_dest;
11311                 if (flow_dv_dest_array_resource_register(dev, mdest_res,
11312                                                          dev_flow, error))
11313                         return rte_flow_error_set(error, EINVAL,
11314                                                   RTE_FLOW_ERROR_TYPE_ACTION,
11315                                                   NULL, "can't create sample "
11316                                                   "action");
11317         } else {
11318                 res->sub_actions = sample_actions;
11319                 if (flow_dv_sample_resource_register(dev, res, dev_flow, error))
11320                         return rte_flow_error_set(error, EINVAL,
11321                                                   RTE_FLOW_ERROR_TYPE_ACTION,
11322                                                   NULL,
11323                                                   "can't create sample action");
11324         }
11325         return 0;
11326 }
11327
11328 /**
11329  * Remove an ASO age action from age actions list.
11330  *
11331  * @param[in] dev
11332  *   Pointer to the Ethernet device structure.
11333  * @param[in] age
11334  *   Pointer to the aso age action handler.
11335  */
11336 static void
11337 flow_dv_aso_age_remove_from_age(struct rte_eth_dev *dev,
11338                                 struct mlx5_aso_age_action *age)
11339 {
11340         struct mlx5_age_info *age_info;
11341         struct mlx5_age_param *age_param = &age->age_params;
11342         struct mlx5_priv *priv = dev->data->dev_private;
11343         uint16_t expected = AGE_CANDIDATE;
11344
11345         age_info = GET_PORT_AGE_INFO(priv);
11346         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
11347                                          AGE_FREE, false, __ATOMIC_RELAXED,
11348                                          __ATOMIC_RELAXED)) {
11349                 /**
11350                  * We need the lock even it is age timeout,
11351                  * since age action may still in process.
11352                  */
11353                 rte_spinlock_lock(&age_info->aged_sl);
11354                 LIST_REMOVE(age, next);
11355                 rte_spinlock_unlock(&age_info->aged_sl);
11356                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
11357         }
11358 }
11359
11360 /**
11361  * Release an ASO age action.
11362  *
11363  * @param[in] dev
11364  *   Pointer to the Ethernet device structure.
11365  * @param[in] age_idx
11366  *   Index of ASO age action to release.
11367  * @param[in] flow
11368  *   True if the release operation is during flow destroy operation.
11369  *   False if the release operation is during action destroy operation.
11370  *
11371  * @return
11372  *   0 when age action was removed, otherwise the number of references.
11373  */
11374 static int
11375 flow_dv_aso_age_release(struct rte_eth_dev *dev, uint32_t age_idx)
11376 {
11377         struct mlx5_priv *priv = dev->data->dev_private;
11378         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11379         struct mlx5_aso_age_action *age = flow_aso_age_get_by_idx(dev, age_idx);
11380         uint32_t ret = __atomic_sub_fetch(&age->refcnt, 1, __ATOMIC_RELAXED);
11381
11382         if (!ret) {
11383                 flow_dv_aso_age_remove_from_age(dev, age);
11384                 rte_spinlock_lock(&mng->free_sl);
11385                 LIST_INSERT_HEAD(&mng->free, age, next);
11386                 rte_spinlock_unlock(&mng->free_sl);
11387         }
11388         return ret;
11389 }
11390
11391 /**
11392  * Resize the ASO age pools array by MLX5_CNT_CONTAINER_RESIZE pools.
11393  *
11394  * @param[in] dev
11395  *   Pointer to the Ethernet device structure.
11396  *
11397  * @return
11398  *   0 on success, otherwise negative errno value and rte_errno is set.
11399  */
11400 static int
11401 flow_dv_aso_age_pools_resize(struct rte_eth_dev *dev)
11402 {
11403         struct mlx5_priv *priv = dev->data->dev_private;
11404         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11405         void *old_pools = mng->pools;
11406         uint32_t resize = mng->n + MLX5_CNT_CONTAINER_RESIZE;
11407         uint32_t mem_size = sizeof(struct mlx5_aso_age_pool *) * resize;
11408         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
11409
11410         if (!pools) {
11411                 rte_errno = ENOMEM;
11412                 return -ENOMEM;
11413         }
11414         if (old_pools) {
11415                 memcpy(pools, old_pools,
11416                        mng->n * sizeof(struct mlx5_flow_counter_pool *));
11417                 mlx5_free(old_pools);
11418         } else {
11419                 /* First ASO flow hit allocation - starting ASO data-path. */
11420                 int ret = mlx5_aso_flow_hit_queue_poll_start(priv->sh);
11421
11422                 if (ret) {
11423                         mlx5_free(pools);
11424                         return ret;
11425                 }
11426         }
11427         mng->n = resize;
11428         mng->pools = pools;
11429         return 0;
11430 }
11431
11432 /**
11433  * Create and initialize a new ASO aging pool.
11434  *
11435  * @param[in] dev
11436  *   Pointer to the Ethernet device structure.
11437  * @param[out] age_free
11438  *   Where to put the pointer of a new age action.
11439  *
11440  * @return
11441  *   The age actions pool pointer and @p age_free is set on success,
11442  *   NULL otherwise and rte_errno is set.
11443  */
11444 static struct mlx5_aso_age_pool *
11445 flow_dv_age_pool_create(struct rte_eth_dev *dev,
11446                         struct mlx5_aso_age_action **age_free)
11447 {
11448         struct mlx5_priv *priv = dev->data->dev_private;
11449         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11450         struct mlx5_aso_age_pool *pool = NULL;
11451         struct mlx5_devx_obj *obj = NULL;
11452         uint32_t i;
11453
11454         obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->ctx,
11455                                                     priv->sh->pdn);
11456         if (!obj) {
11457                 rte_errno = ENODATA;
11458                 DRV_LOG(ERR, "Failed to create flow_hit_aso_obj using DevX.");
11459                 return NULL;
11460         }
11461         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
11462         if (!pool) {
11463                 claim_zero(mlx5_devx_cmd_destroy(obj));
11464                 rte_errno = ENOMEM;
11465                 return NULL;
11466         }
11467         pool->flow_hit_aso_obj = obj;
11468         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
11469         rte_spinlock_lock(&mng->resize_sl);
11470         pool->index = mng->next;
11471         /* Resize pools array if there is no room for the new pool in it. */
11472         if (pool->index == mng->n && flow_dv_aso_age_pools_resize(dev)) {
11473                 claim_zero(mlx5_devx_cmd_destroy(obj));
11474                 mlx5_free(pool);
11475                 rte_spinlock_unlock(&mng->resize_sl);
11476                 return NULL;
11477         }
11478         mng->pools[pool->index] = pool;
11479         mng->next++;
11480         rte_spinlock_unlock(&mng->resize_sl);
11481         /* Assign the first action in the new pool, the rest go to free list. */
11482         *age_free = &pool->actions[0];
11483         for (i = 1; i < MLX5_ASO_AGE_ACTIONS_PER_POOL; i++) {
11484                 pool->actions[i].offset = i;
11485                 LIST_INSERT_HEAD(&mng->free, &pool->actions[i], next);
11486         }
11487         return pool;
11488 }
11489
11490 /**
11491  * Allocate a ASO aging bit.
11492  *
11493  * @param[in] dev
11494  *   Pointer to the Ethernet device structure.
11495  * @param[out] error
11496  *   Pointer to the error structure.
11497  *
11498  * @return
11499  *   Index to ASO age action on success, 0 otherwise and rte_errno is set.
11500  */
11501 static uint32_t
11502 flow_dv_aso_age_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
11503 {
11504         struct mlx5_priv *priv = dev->data->dev_private;
11505         const struct mlx5_aso_age_pool *pool;
11506         struct mlx5_aso_age_action *age_free = NULL;
11507         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11508
11509         MLX5_ASSERT(mng);
11510         /* Try to get the next free age action bit. */
11511         rte_spinlock_lock(&mng->free_sl);
11512         age_free = LIST_FIRST(&mng->free);
11513         if (age_free) {
11514                 LIST_REMOVE(age_free, next);
11515         } else if (!flow_dv_age_pool_create(dev, &age_free)) {
11516                 rte_spinlock_unlock(&mng->free_sl);
11517                 rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
11518                                    NULL, "failed to create ASO age pool");
11519                 return 0; /* 0 is an error. */
11520         }
11521         rte_spinlock_unlock(&mng->free_sl);
11522         pool = container_of
11523           ((const struct mlx5_aso_age_action (*)[MLX5_ASO_AGE_ACTIONS_PER_POOL])
11524                   (age_free - age_free->offset), const struct mlx5_aso_age_pool,
11525                                                                        actions);
11526         if (!age_free->dr_action) {
11527                 int reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_FLOW_HIT, 0,
11528                                                  error);
11529
11530                 if (reg_c < 0) {
11531                         rte_flow_error_set(error, rte_errno,
11532                                            RTE_FLOW_ERROR_TYPE_ACTION,
11533                                            NULL, "failed to get reg_c "
11534                                            "for ASO flow hit");
11535                         return 0; /* 0 is an error. */
11536                 }
11537 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
11538                 age_free->dr_action = mlx5_glue->dv_create_flow_action_aso
11539                                 (priv->sh->rx_domain,
11540                                  pool->flow_hit_aso_obj->obj, age_free->offset,
11541                                  MLX5DV_DR_ACTION_FLAGS_ASO_FIRST_HIT_SET,
11542                                  (reg_c - REG_C_0));
11543 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
11544                 if (!age_free->dr_action) {
11545                         rte_errno = errno;
11546                         rte_spinlock_lock(&mng->free_sl);
11547                         LIST_INSERT_HEAD(&mng->free, age_free, next);
11548                         rte_spinlock_unlock(&mng->free_sl);
11549                         rte_flow_error_set(error, rte_errno,
11550                                            RTE_FLOW_ERROR_TYPE_ACTION,
11551                                            NULL, "failed to create ASO "
11552                                            "flow hit action");
11553                         return 0; /* 0 is an error. */
11554                 }
11555         }
11556         __atomic_store_n(&age_free->refcnt, 1, __ATOMIC_RELAXED);
11557         return pool->index | ((age_free->offset + 1) << 16);
11558 }
11559
11560 /**
11561  * Initialize flow ASO age parameters.
11562  *
11563  * @param[in] dev
11564  *   Pointer to rte_eth_dev structure.
11565  * @param[in] age_idx
11566  *   Index of ASO age action.
11567  * @param[in] context
11568  *   Pointer to flow counter age context.
11569  * @param[in] timeout
11570  *   Aging timeout in seconds.
11571  *
11572  */
11573 static void
11574 flow_dv_aso_age_params_init(struct rte_eth_dev *dev,
11575                             uint32_t age_idx,
11576                             void *context,
11577                             uint32_t timeout)
11578 {
11579         struct mlx5_aso_age_action *aso_age;
11580
11581         aso_age = flow_aso_age_get_by_idx(dev, age_idx);
11582         MLX5_ASSERT(aso_age);
11583         aso_age->age_params.context = context;
11584         aso_age->age_params.timeout = timeout;
11585         aso_age->age_params.port_id = dev->data->port_id;
11586         __atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0,
11587                          __ATOMIC_RELAXED);
11588         __atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE,
11589                          __ATOMIC_RELAXED);
11590 }
11591
11592 static void
11593 flow_dv_translate_integrity_l4(const struct rte_flow_item_integrity *mask,
11594                                const struct rte_flow_item_integrity *value,
11595                                void *headers_m, void *headers_v)
11596 {
11597         if (mask->l4_ok) {
11598                 /* application l4_ok filter aggregates all hardware l4 filters
11599                  * therefore hw l4_checksum_ok must be implicitly added here.
11600                  */
11601                 struct rte_flow_item_integrity local_item;
11602
11603                 local_item.l4_csum_ok = 1;
11604                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok,
11605                          local_item.l4_csum_ok);
11606                 if (value->l4_ok) {
11607                         /* application l4_ok = 1 matches sets both hw flags
11608                          * l4_ok and l4_checksum_ok flags to 1.
11609                          */
11610                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
11611                                  l4_checksum_ok, local_item.l4_csum_ok);
11612                         MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_ok,
11613                                  mask->l4_ok);
11614                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_ok,
11615                                  value->l4_ok);
11616                 } else {
11617                         /* application l4_ok = 0 matches on hw flag
11618                          * l4_checksum_ok = 0 only.
11619                          */
11620                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
11621                                  l4_checksum_ok, 0);
11622                 }
11623         } else if (mask->l4_csum_ok) {
11624                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok,
11625                          mask->l4_csum_ok);
11626                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_checksum_ok,
11627                          value->l4_csum_ok);
11628         }
11629 }
11630
11631 static void
11632 flow_dv_translate_integrity_l3(const struct rte_flow_item_integrity *mask,
11633                                const struct rte_flow_item_integrity *value,
11634                                void *headers_m, void *headers_v,
11635                                bool is_ipv4)
11636 {
11637         if (mask->l3_ok) {
11638                 /* application l3_ok filter aggregates all hardware l3 filters
11639                  * therefore hw ipv4_checksum_ok must be implicitly added here.
11640                  */
11641                 struct rte_flow_item_integrity local_item;
11642
11643                 local_item.ipv4_csum_ok = !!is_ipv4;
11644                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok,
11645                          local_item.ipv4_csum_ok);
11646                 if (value->l3_ok) {
11647                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
11648                                  ipv4_checksum_ok, local_item.ipv4_csum_ok);
11649                         MLX5_SET(fte_match_set_lyr_2_4, headers_m, l3_ok,
11650                                  mask->l3_ok);
11651                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, l3_ok,
11652                                  value->l3_ok);
11653                 } else {
11654                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
11655                                  ipv4_checksum_ok, 0);
11656                 }
11657         } else if (mask->ipv4_csum_ok) {
11658                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok,
11659                          mask->ipv4_csum_ok);
11660                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_checksum_ok,
11661                          value->ipv4_csum_ok);
11662         }
11663 }
11664
11665 static void
11666 flow_dv_translate_item_integrity(void *matcher, void *key,
11667                                  const struct rte_flow_item *head_item,
11668                                  const struct rte_flow_item *integrity_item)
11669 {
11670         const struct rte_flow_item_integrity *mask = integrity_item->mask;
11671         const struct rte_flow_item_integrity *value = integrity_item->spec;
11672         const struct rte_flow_item *tunnel_item, *end_item, *item;
11673         void *headers_m;
11674         void *headers_v;
11675         uint32_t l3_protocol;
11676
11677         if (!value)
11678                 return;
11679         if (!mask)
11680                 mask = &rte_flow_item_integrity_mask;
11681         if (value->level > 1) {
11682                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
11683                                          inner_headers);
11684                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
11685         } else {
11686                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
11687                                          outer_headers);
11688                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
11689         }
11690         tunnel_item = mlx5_flow_find_tunnel_item(head_item);
11691         if (value->level > 1) {
11692                 /* tunnel item was verified during the item validation */
11693                 item = tunnel_item;
11694                 end_item = mlx5_find_end_item(tunnel_item);
11695         } else {
11696                 item = head_item;
11697                 end_item = tunnel_item ? tunnel_item :
11698                            mlx5_find_end_item(integrity_item);
11699         }
11700         l3_protocol = mask->l3_ok ?
11701                       mlx5_flow_locate_proto_l3(&item, end_item) : 0;
11702         flow_dv_translate_integrity_l3(mask, value, headers_m, headers_v,
11703                                        l3_protocol == RTE_ETHER_TYPE_IPV4);
11704         flow_dv_translate_integrity_l4(mask, value, headers_m, headers_v);
11705 }
11706
11707 /**
11708  * Prepares DV flow counter with aging configuration.
11709  * Gets it by index when exists, creates a new one when doesn't.
11710  *
11711  * @param[in] dev
11712  *   Pointer to rte_eth_dev structure.
11713  * @param[in] dev_flow
11714  *   Pointer to the mlx5_flow.
11715  * @param[in, out] flow
11716  *   Pointer to the sub flow.
11717  * @param[in] count
11718  *   Pointer to the counter action configuration.
11719  * @param[in] age
11720  *   Pointer to the aging action configuration.
11721  * @param[out] error
11722  *   Pointer to the error structure.
11723  *
11724  * @return
11725  *   Pointer to the counter, NULL otherwise.
11726  */
11727 static struct mlx5_flow_counter *
11728 flow_dv_prepare_counter(struct rte_eth_dev *dev,
11729                         struct mlx5_flow *dev_flow,
11730                         struct rte_flow *flow,
11731                         const struct rte_flow_action_count *count,
11732                         const struct rte_flow_action_age *age,
11733                         struct rte_flow_error *error)
11734 {
11735         if (!flow->counter) {
11736                 flow->counter = flow_dv_translate_create_counter(dev, dev_flow,
11737                                                                  count, age);
11738                 if (!flow->counter) {
11739                         rte_flow_error_set(error, rte_errno,
11740                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11741                                            "cannot create counter object.");
11742                         return NULL;
11743                 }
11744         }
11745         return flow_dv_counter_get_by_idx(dev, flow->counter, NULL);
11746 }
11747
11748 /*
11749  * Release an ASO CT action by its own device.
11750  *
11751  * @param[in] dev
11752  *   Pointer to the Ethernet device structure.
11753  * @param[in] idx
11754  *   Index of ASO CT action to release.
11755  *
11756  * @return
11757  *   0 when CT action was removed, otherwise the number of references.
11758  */
11759 static inline int
11760 flow_dv_aso_ct_dev_release(struct rte_eth_dev *dev, uint32_t idx)
11761 {
11762         struct mlx5_priv *priv = dev->data->dev_private;
11763         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
11764         uint32_t ret;
11765         struct mlx5_aso_ct_action *ct = flow_aso_ct_get_by_dev_idx(dev, idx);
11766         enum mlx5_aso_ct_state state =
11767                         __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
11768
11769         /* Cannot release when CT is in the ASO SQ. */
11770         if (state == ASO_CONNTRACK_WAIT || state == ASO_CONNTRACK_QUERY)
11771                 return -1;
11772         ret = __atomic_sub_fetch(&ct->refcnt, 1, __ATOMIC_RELAXED);
11773         if (!ret) {
11774                 if (ct->dr_action_orig) {
11775 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
11776                         claim_zero(mlx5_glue->destroy_flow_action
11777                                         (ct->dr_action_orig));
11778 #endif
11779                         ct->dr_action_orig = NULL;
11780                 }
11781                 if (ct->dr_action_rply) {
11782 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
11783                         claim_zero(mlx5_glue->destroy_flow_action
11784                                         (ct->dr_action_rply));
11785 #endif
11786                         ct->dr_action_rply = NULL;
11787                 }
11788                 /* Clear the state to free, no need in 1st allocation. */
11789                 MLX5_ASO_CT_UPDATE_STATE(ct, ASO_CONNTRACK_FREE);
11790                 rte_spinlock_lock(&mng->ct_sl);
11791                 LIST_INSERT_HEAD(&mng->free_cts, ct, next);
11792                 rte_spinlock_unlock(&mng->ct_sl);
11793         }
11794         return (int)ret;
11795 }
11796
11797 static inline int
11798 flow_dv_aso_ct_release(struct rte_eth_dev *dev, uint32_t own_idx)
11799 {
11800         uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(own_idx);
11801         uint32_t idx = MLX5_INDIRECT_ACT_CT_GET_IDX(own_idx);
11802         struct rte_eth_dev *owndev = &rte_eth_devices[owner];
11803         RTE_SET_USED(dev);
11804
11805         MLX5_ASSERT(owner < RTE_MAX_ETHPORTS);
11806         if (dev->data->dev_started != 1)
11807                 return -1;
11808         return flow_dv_aso_ct_dev_release(owndev, idx);
11809 }
11810
11811 /*
11812  * Resize the ASO CT pools array by 64 pools.
11813  *
11814  * @param[in] dev
11815  *   Pointer to the Ethernet device structure.
11816  *
11817  * @return
11818  *   0 on success, otherwise negative errno value and rte_errno is set.
11819  */
11820 static int
11821 flow_dv_aso_ct_pools_resize(struct rte_eth_dev *dev)
11822 {
11823         struct mlx5_priv *priv = dev->data->dev_private;
11824         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
11825         void *old_pools = mng->pools;
11826         /* Magic number now, need a macro. */
11827         uint32_t resize = mng->n + 64;
11828         uint32_t mem_size = sizeof(struct mlx5_aso_ct_pool *) * resize;
11829         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
11830
11831         if (!pools) {
11832                 rte_errno = ENOMEM;
11833                 return -rte_errno;
11834         }
11835         rte_rwlock_write_lock(&mng->resize_rwl);
11836         /* ASO SQ/QP was already initialized in the startup. */
11837         if (old_pools) {
11838                 /* Realloc could be an alternative choice. */
11839                 rte_memcpy(pools, old_pools,
11840                            mng->n * sizeof(struct mlx5_aso_ct_pool *));
11841                 mlx5_free(old_pools);
11842         }
11843         mng->n = resize;
11844         mng->pools = pools;
11845         rte_rwlock_write_unlock(&mng->resize_rwl);
11846         return 0;
11847 }
11848
11849 /*
11850  * Create and initialize a new ASO CT pool.
11851  *
11852  * @param[in] dev
11853  *   Pointer to the Ethernet device structure.
11854  * @param[out] ct_free
11855  *   Where to put the pointer of a new CT action.
11856  *
11857  * @return
11858  *   The CT actions pool pointer and @p ct_free is set on success,
11859  *   NULL otherwise and rte_errno is set.
11860  */
11861 static struct mlx5_aso_ct_pool *
11862 flow_dv_ct_pool_create(struct rte_eth_dev *dev,
11863                        struct mlx5_aso_ct_action **ct_free)
11864 {
11865         struct mlx5_priv *priv = dev->data->dev_private;
11866         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
11867         struct mlx5_aso_ct_pool *pool = NULL;
11868         struct mlx5_devx_obj *obj = NULL;
11869         uint32_t i;
11870         uint32_t log_obj_size = rte_log2_u32(MLX5_ASO_CT_ACTIONS_PER_POOL);
11871
11872         obj = mlx5_devx_cmd_create_conn_track_offload_obj(priv->sh->ctx,
11873                                                 priv->sh->pdn, log_obj_size);
11874         if (!obj) {
11875                 rte_errno = ENODATA;
11876                 DRV_LOG(ERR, "Failed to create conn_track_offload_obj using DevX.");
11877                 return NULL;
11878         }
11879         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
11880         if (!pool) {
11881                 rte_errno = ENOMEM;
11882                 claim_zero(mlx5_devx_cmd_destroy(obj));
11883                 return NULL;
11884         }
11885         pool->devx_obj = obj;
11886         pool->index = mng->next;
11887         /* Resize pools array if there is no room for the new pool in it. */
11888         if (pool->index == mng->n && flow_dv_aso_ct_pools_resize(dev)) {
11889                 claim_zero(mlx5_devx_cmd_destroy(obj));
11890                 mlx5_free(pool);
11891                 return NULL;
11892         }
11893         mng->pools[pool->index] = pool;
11894         mng->next++;
11895         /* Assign the first action in the new pool, the rest go to free list. */
11896         *ct_free = &pool->actions[0];
11897         /* Lock outside, the list operation is safe here. */
11898         for (i = 1; i < MLX5_ASO_CT_ACTIONS_PER_POOL; i++) {
11899                 /* refcnt is 0 when allocating the memory. */
11900                 pool->actions[i].offset = i;
11901                 LIST_INSERT_HEAD(&mng->free_cts, &pool->actions[i], next);
11902         }
11903         return pool;
11904 }
11905
11906 /*
11907  * Allocate a ASO CT action from free list.
11908  *
11909  * @param[in] dev
11910  *   Pointer to the Ethernet device structure.
11911  * @param[out] error
11912  *   Pointer to the error structure.
11913  *
11914  * @return
11915  *   Index to ASO CT action on success, 0 otherwise and rte_errno is set.
11916  */
11917 static uint32_t
11918 flow_dv_aso_ct_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
11919 {
11920         struct mlx5_priv *priv = dev->data->dev_private;
11921         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
11922         struct mlx5_aso_ct_action *ct = NULL;
11923         struct mlx5_aso_ct_pool *pool;
11924         uint8_t reg_c;
11925         uint32_t ct_idx;
11926
11927         MLX5_ASSERT(mng);
11928         if (!priv->config.devx) {
11929                 rte_errno = ENOTSUP;
11930                 return 0;
11931         }
11932         /* Get a free CT action, if no, a new pool will be created. */
11933         rte_spinlock_lock(&mng->ct_sl);
11934         ct = LIST_FIRST(&mng->free_cts);
11935         if (ct) {
11936                 LIST_REMOVE(ct, next);
11937         } else if (!flow_dv_ct_pool_create(dev, &ct)) {
11938                 rte_spinlock_unlock(&mng->ct_sl);
11939                 rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
11940                                    NULL, "failed to create ASO CT pool");
11941                 return 0;
11942         }
11943         rte_spinlock_unlock(&mng->ct_sl);
11944         pool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);
11945         ct_idx = MLX5_MAKE_CT_IDX(pool->index, ct->offset);
11946         /* 0: inactive, 1: created, 2+: used by flows. */
11947         __atomic_store_n(&ct->refcnt, 1, __ATOMIC_RELAXED);
11948         reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, error);
11949         if (!ct->dr_action_orig) {
11950 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
11951                 ct->dr_action_orig = mlx5_glue->dv_create_flow_action_aso
11952                         (priv->sh->rx_domain, pool->devx_obj->obj,
11953                          ct->offset,
11954                          MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_INITIATOR,
11955                          reg_c - REG_C_0);
11956 #else
11957                 RTE_SET_USED(reg_c);
11958 #endif
11959                 if (!ct->dr_action_orig) {
11960                         flow_dv_aso_ct_dev_release(dev, ct_idx);
11961                         rte_flow_error_set(error, rte_errno,
11962                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11963                                            "failed to create ASO CT action");
11964                         return 0;
11965                 }
11966         }
11967         if (!ct->dr_action_rply) {
11968 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
11969                 ct->dr_action_rply = mlx5_glue->dv_create_flow_action_aso
11970                         (priv->sh->rx_domain, pool->devx_obj->obj,
11971                          ct->offset,
11972                          MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_RESPONDER,
11973                          reg_c - REG_C_0);
11974 #endif
11975                 if (!ct->dr_action_rply) {
11976                         flow_dv_aso_ct_dev_release(dev, ct_idx);
11977                         rte_flow_error_set(error, rte_errno,
11978                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11979                                            "failed to create ASO CT action");
11980                         return 0;
11981                 }
11982         }
11983         return ct_idx;
11984 }
11985
11986 /*
11987  * Create a conntrack object with context and actions by using ASO mechanism.
11988  *
11989  * @param[in] dev
11990  *   Pointer to rte_eth_dev structure.
11991  * @param[in] pro
11992  *   Pointer to conntrack information profile.
11993  * @param[out] error
11994  *   Pointer to the error structure.
11995  *
11996  * @return
11997  *   Index to conntrack object on success, 0 otherwise.
11998  */
11999 static uint32_t
12000 flow_dv_translate_create_conntrack(struct rte_eth_dev *dev,
12001                                    const struct rte_flow_action_conntrack *pro,
12002                                    struct rte_flow_error *error)
12003 {
12004         struct mlx5_priv *priv = dev->data->dev_private;
12005         struct mlx5_dev_ctx_shared *sh = priv->sh;
12006         struct mlx5_aso_ct_action *ct;
12007         uint32_t idx;
12008
12009         if (!sh->ct_aso_en)
12010                 return rte_flow_error_set(error, ENOTSUP,
12011                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12012                                           "Connection is not supported");
12013         idx = flow_dv_aso_ct_alloc(dev, error);
12014         if (!idx)
12015                 return rte_flow_error_set(error, rte_errno,
12016                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12017                                           "Failed to allocate CT object");
12018         ct = flow_aso_ct_get_by_dev_idx(dev, idx);
12019         if (mlx5_aso_ct_update_by_wqe(sh, ct, pro))
12020                 return rte_flow_error_set(error, EBUSY,
12021                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12022                                           "Failed to update CT");
12023         ct->is_original = !!pro->is_original_dir;
12024         ct->peer = pro->peer_port;
12025         return idx;
12026 }
12027
12028 /**
12029  * Fill the flow with DV spec, lock free
12030  * (mutex should be acquired by caller).
12031  *
12032  * @param[in] dev
12033  *   Pointer to rte_eth_dev structure.
12034  * @param[in, out] dev_flow
12035  *   Pointer to the sub flow.
12036  * @param[in] attr
12037  *   Pointer to the flow attributes.
12038  * @param[in] items
12039  *   Pointer to the list of items.
12040  * @param[in] actions
12041  *   Pointer to the list of actions.
12042  * @param[out] error
12043  *   Pointer to the error structure.
12044  *
12045  * @return
12046  *   0 on success, a negative errno value otherwise and rte_errno is set.
12047  */
12048 static int
12049 flow_dv_translate(struct rte_eth_dev *dev,
12050                   struct mlx5_flow *dev_flow,
12051                   const struct rte_flow_attr *attr,
12052                   const struct rte_flow_item items[],
12053                   const struct rte_flow_action actions[],
12054                   struct rte_flow_error *error)
12055 {
12056         struct mlx5_priv *priv = dev->data->dev_private;
12057         struct mlx5_dev_config *dev_conf = &priv->config;
12058         struct rte_flow *flow = dev_flow->flow;
12059         struct mlx5_flow_handle *handle = dev_flow->handle;
12060         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
12061         struct mlx5_flow_rss_desc *rss_desc;
12062         uint64_t item_flags = 0;
12063         uint64_t last_item = 0;
12064         uint64_t action_flags = 0;
12065         struct mlx5_flow_dv_matcher matcher = {
12066                 .mask = {
12067                         .size = sizeof(matcher.mask.buf) -
12068                                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
12069                 },
12070         };
12071         int actions_n = 0;
12072         bool actions_end = false;
12073         union {
12074                 struct mlx5_flow_dv_modify_hdr_resource res;
12075                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
12076                             sizeof(struct mlx5_modification_cmd) *
12077                             (MLX5_MAX_MODIFY_NUM + 1)];
12078         } mhdr_dummy;
12079         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
12080         const struct rte_flow_action_count *count = NULL;
12081         const struct rte_flow_action_age *non_shared_age = NULL;
12082         union flow_dv_attr flow_attr = { .attr = 0 };
12083         uint32_t tag_be;
12084         union mlx5_flow_tbl_key tbl_key;
12085         uint32_t modify_action_position = UINT32_MAX;
12086         void *match_mask = matcher.mask.buf;
12087         void *match_value = dev_flow->dv.value.buf;
12088         uint8_t next_protocol = 0xff;
12089         struct rte_vlan_hdr vlan = { 0 };
12090         struct mlx5_flow_dv_dest_array_resource mdest_res;
12091         struct mlx5_flow_dv_sample_resource sample_res;
12092         void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
12093         const struct rte_flow_action_sample *sample = NULL;
12094         struct mlx5_flow_sub_actions_list *sample_act;
12095         uint32_t sample_act_pos = UINT32_MAX;
12096         uint32_t age_act_pos = UINT32_MAX;
12097         uint32_t num_of_dest = 0;
12098         int tmp_actions_n = 0;
12099         uint32_t table;
12100         int ret = 0;
12101         const struct mlx5_flow_tunnel *tunnel = NULL;
12102         struct flow_grp_info grp_info = {
12103                 .external = !!dev_flow->external,
12104                 .transfer = !!attr->transfer,
12105                 .fdb_def_rule = !!priv->fdb_def_rule,
12106                 .skip_scale = dev_flow->skip_scale &
12107                         (1 << MLX5_SCALE_FLOW_GROUP_BIT),
12108                 .std_tbl_fix = true,
12109         };
12110         const struct rte_flow_item *head_item = items;
12111
12112         if (!wks)
12113                 return rte_flow_error_set(error, ENOMEM,
12114                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12115                                           NULL,
12116                                           "failed to push flow workspace");
12117         rss_desc = &wks->rss_desc;
12118         memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
12119         memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
12120         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
12121                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
12122         /* update normal path action resource into last index of array */
12123         sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
12124         if (is_tunnel_offload_active(dev)) {
12125                 if (dev_flow->tunnel) {
12126                         RTE_VERIFY(dev_flow->tof_type ==
12127                                    MLX5_TUNNEL_OFFLOAD_MISS_RULE);
12128                         tunnel = dev_flow->tunnel;
12129                 } else {
12130                         tunnel = mlx5_get_tof(items, actions,
12131                                               &dev_flow->tof_type);
12132                         dev_flow->tunnel = tunnel;
12133                 }
12134                 grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
12135                                         (dev, attr, tunnel, dev_flow->tof_type);
12136         }
12137         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
12138                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
12139         ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
12140                                        &grp_info, error);
12141         if (ret)
12142                 return ret;
12143         dev_flow->dv.group = table;
12144         if (attr->transfer)
12145                 mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
12146         /* number of actions must be set to 0 in case of dirty stack. */
12147         mhdr_res->actions_num = 0;
12148         if (is_flow_tunnel_match_rule(dev_flow->tof_type)) {
12149                 /*
12150                  * do not add decap action if match rule drops packet
12151                  * HW rejects rules with decap & drop
12152                  *
12153                  * if tunnel match rule was inserted before matching tunnel set
12154                  * rule flow table used in the match rule must be registered.
12155                  * current implementation handles that in the
12156                  * flow_dv_match_register() at the function end.
12157                  */
12158                 bool add_decap = true;
12159                 const struct rte_flow_action *ptr = actions;
12160
12161                 for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
12162                         if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
12163                                 add_decap = false;
12164                                 break;
12165                         }
12166                 }
12167                 if (add_decap) {
12168                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
12169                                                            attr->transfer,
12170                                                            error))
12171                                 return -rte_errno;
12172                         dev_flow->dv.actions[actions_n++] =
12173                                         dev_flow->dv.encap_decap->action;
12174                         action_flags |= MLX5_FLOW_ACTION_DECAP;
12175                 }
12176         }
12177         for (; !actions_end ; actions++) {
12178                 const struct rte_flow_action_queue *queue;
12179                 const struct rte_flow_action_rss *rss;
12180                 const struct rte_flow_action *action = actions;
12181                 const uint8_t *rss_key;
12182                 struct mlx5_flow_tbl_resource *tbl;
12183                 struct mlx5_aso_age_action *age_act;
12184                 struct mlx5_flow_counter *cnt_act;
12185                 uint32_t port_id = 0;
12186                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
12187                 int action_type = actions->type;
12188                 const struct rte_flow_action *found_action = NULL;
12189                 uint32_t jump_group = 0;
12190                 uint32_t owner_idx;
12191                 struct mlx5_aso_ct_action *ct;
12192
12193                 if (!mlx5_flow_os_action_supported(action_type))
12194                         return rte_flow_error_set(error, ENOTSUP,
12195                                                   RTE_FLOW_ERROR_TYPE_ACTION,
12196                                                   actions,
12197                                                   "action not supported");
12198                 switch (action_type) {
12199                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
12200                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
12201                         break;
12202                 case RTE_FLOW_ACTION_TYPE_VOID:
12203                         break;
12204                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
12205                         if (flow_dv_translate_action_port_id(dev, action,
12206                                                              &port_id, error))
12207                                 return -rte_errno;
12208                         port_id_resource.port_id = port_id;
12209                         MLX5_ASSERT(!handle->rix_port_id_action);
12210                         if (flow_dv_port_id_action_resource_register
12211                             (dev, &port_id_resource, dev_flow, error))
12212                                 return -rte_errno;
12213                         dev_flow->dv.actions[actions_n++] =
12214                                         dev_flow->dv.port_id_action->action;
12215                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
12216                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
12217                         sample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID;
12218                         num_of_dest++;
12219                         break;
12220                 case RTE_FLOW_ACTION_TYPE_FLAG:
12221                         action_flags |= MLX5_FLOW_ACTION_FLAG;
12222                         dev_flow->handle->mark = 1;
12223                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
12224                                 struct rte_flow_action_mark mark = {
12225                                         .id = MLX5_FLOW_MARK_DEFAULT,
12226                                 };
12227
12228                                 if (flow_dv_convert_action_mark(dev, &mark,
12229                                                                 mhdr_res,
12230                                                                 error))
12231                                         return -rte_errno;
12232                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
12233                                 break;
12234                         }
12235                         tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
12236                         /*
12237                          * Only one FLAG or MARK is supported per device flow
12238                          * right now. So the pointer to the tag resource must be
12239                          * zero before the register process.
12240                          */
12241                         MLX5_ASSERT(!handle->dvh.rix_tag);
12242                         if (flow_dv_tag_resource_register(dev, tag_be,
12243                                                           dev_flow, error))
12244                                 return -rte_errno;
12245                         MLX5_ASSERT(dev_flow->dv.tag_resource);
12246                         dev_flow->dv.actions[actions_n++] =
12247                                         dev_flow->dv.tag_resource->action;
12248                         break;
12249                 case RTE_FLOW_ACTION_TYPE_MARK:
12250                         action_flags |= MLX5_FLOW_ACTION_MARK;
12251                         dev_flow->handle->mark = 1;
12252                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
12253                                 const struct rte_flow_action_mark *mark =
12254                                         (const struct rte_flow_action_mark *)
12255                                                 actions->conf;
12256
12257                                 if (flow_dv_convert_action_mark(dev, mark,
12258                                                                 mhdr_res,
12259                                                                 error))
12260                                         return -rte_errno;
12261                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
12262                                 break;
12263                         }
12264                         /* Fall-through */
12265                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
12266                         /* Legacy (non-extensive) MARK action. */
12267                         tag_be = mlx5_flow_mark_set
12268                               (((const struct rte_flow_action_mark *)
12269                                (actions->conf))->id);
12270                         MLX5_ASSERT(!handle->dvh.rix_tag);
12271                         if (flow_dv_tag_resource_register(dev, tag_be,
12272                                                           dev_flow, error))
12273                                 return -rte_errno;
12274                         MLX5_ASSERT(dev_flow->dv.tag_resource);
12275                         dev_flow->dv.actions[actions_n++] =
12276                                         dev_flow->dv.tag_resource->action;
12277                         break;
12278                 case RTE_FLOW_ACTION_TYPE_SET_META:
12279                         if (flow_dv_convert_action_set_meta
12280                                 (dev, mhdr_res, attr,
12281                                  (const struct rte_flow_action_set_meta *)
12282                                   actions->conf, error))
12283                                 return -rte_errno;
12284                         action_flags |= MLX5_FLOW_ACTION_SET_META;
12285                         break;
12286                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
12287                         if (flow_dv_convert_action_set_tag
12288                                 (dev, mhdr_res,
12289                                  (const struct rte_flow_action_set_tag *)
12290                                   actions->conf, error))
12291                                 return -rte_errno;
12292                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
12293                         break;
12294                 case RTE_FLOW_ACTION_TYPE_DROP:
12295                         action_flags |= MLX5_FLOW_ACTION_DROP;
12296                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
12297                         break;
12298                 case RTE_FLOW_ACTION_TYPE_QUEUE:
12299                         queue = actions->conf;
12300                         rss_desc->queue_num = 1;
12301                         rss_desc->queue[0] = queue->index;
12302                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
12303                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
12304                         sample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE;
12305                         num_of_dest++;
12306                         break;
12307                 case RTE_FLOW_ACTION_TYPE_RSS:
12308                         rss = actions->conf;
12309                         memcpy(rss_desc->queue, rss->queue,
12310                                rss->queue_num * sizeof(uint16_t));
12311                         rss_desc->queue_num = rss->queue_num;
12312                         /* NULL RSS key indicates default RSS key. */
12313                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
12314                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
12315                         /*
12316                          * rss->level and rss.types should be set in advance
12317                          * when expanding items for RSS.
12318                          */
12319                         action_flags |= MLX5_FLOW_ACTION_RSS;
12320                         dev_flow->handle->fate_action = rss_desc->shared_rss ?
12321                                 MLX5_FLOW_FATE_SHARED_RSS :
12322                                 MLX5_FLOW_FATE_QUEUE;
12323                         break;
12324                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
12325                         flow->age = (uint32_t)(uintptr_t)(action->conf);
12326                         age_act = flow_aso_age_get_by_idx(dev, flow->age);
12327                         __atomic_fetch_add(&age_act->refcnt, 1,
12328                                            __ATOMIC_RELAXED);
12329                         age_act_pos = actions_n++;
12330                         action_flags |= MLX5_FLOW_ACTION_AGE;
12331                         break;
12332                 case RTE_FLOW_ACTION_TYPE_AGE:
12333                         non_shared_age = action->conf;
12334                         age_act_pos = actions_n++;
12335                         action_flags |= MLX5_FLOW_ACTION_AGE;
12336                         break;
12337                 case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
12338                         flow->counter = (uint32_t)(uintptr_t)(action->conf);
12339                         cnt_act = flow_dv_counter_get_by_idx(dev, flow->counter,
12340                                                              NULL);
12341                         __atomic_fetch_add(&cnt_act->shared_info.refcnt, 1,
12342                                            __ATOMIC_RELAXED);
12343                         /* Save information first, will apply later. */
12344                         action_flags |= MLX5_FLOW_ACTION_COUNT;
12345                         break;
12346                 case RTE_FLOW_ACTION_TYPE_COUNT:
12347                         if (!dev_conf->devx) {
12348                                 return rte_flow_error_set
12349                                               (error, ENOTSUP,
12350                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12351                                                NULL,
12352                                                "count action not supported");
12353                         }
12354                         /* Save information first, will apply later. */
12355                         count = action->conf;
12356                         action_flags |= MLX5_FLOW_ACTION_COUNT;
12357                         break;
12358                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
12359                         dev_flow->dv.actions[actions_n++] =
12360                                                 priv->sh->pop_vlan_action;
12361                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
12362                         break;
12363                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
12364                         if (!(action_flags &
12365                               MLX5_FLOW_ACTION_OF_SET_VLAN_VID))
12366                                 flow_dev_get_vlan_info_from_items(items, &vlan);
12367                         vlan.eth_proto = rte_be_to_cpu_16
12368                              ((((const struct rte_flow_action_of_push_vlan *)
12369                                                    actions->conf)->ethertype));
12370                         found_action = mlx5_flow_find_action
12371                                         (actions + 1,
12372                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
12373                         if (found_action)
12374                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
12375                         found_action = mlx5_flow_find_action
12376                                         (actions + 1,
12377                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
12378                         if (found_action)
12379                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
12380                         if (flow_dv_create_action_push_vlan
12381                                             (dev, attr, &vlan, dev_flow, error))
12382                                 return -rte_errno;
12383                         dev_flow->dv.actions[actions_n++] =
12384                                         dev_flow->dv.push_vlan_res->action;
12385                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
12386                         break;
12387                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
12388                         /* of_vlan_push action handled this action */
12389                         MLX5_ASSERT(action_flags &
12390                                     MLX5_FLOW_ACTION_OF_PUSH_VLAN);
12391                         break;
12392                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
12393                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
12394                                 break;
12395                         flow_dev_get_vlan_info_from_items(items, &vlan);
12396                         mlx5_update_vlan_vid_pcp(actions, &vlan);
12397                         /* If no VLAN push - this is a modify header action */
12398                         if (flow_dv_convert_action_modify_vlan_vid
12399                                                 (mhdr_res, actions, error))
12400                                 return -rte_errno;
12401                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
12402                         break;
12403                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
12404                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
12405                         if (flow_dv_create_action_l2_encap(dev, actions,
12406                                                            dev_flow,
12407                                                            attr->transfer,
12408                                                            error))
12409                                 return -rte_errno;
12410                         dev_flow->dv.actions[actions_n++] =
12411                                         dev_flow->dv.encap_decap->action;
12412                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
12413                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
12414                                 sample_act->action_flags |=
12415                                                         MLX5_FLOW_ACTION_ENCAP;
12416                         break;
12417                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
12418                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
12419                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
12420                                                            attr->transfer,
12421                                                            error))
12422                                 return -rte_errno;
12423                         dev_flow->dv.actions[actions_n++] =
12424                                         dev_flow->dv.encap_decap->action;
12425                         action_flags |= MLX5_FLOW_ACTION_DECAP;
12426                         break;
12427                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
12428                         /* Handle encap with preceding decap. */
12429                         if (action_flags & MLX5_FLOW_ACTION_DECAP) {
12430                                 if (flow_dv_create_action_raw_encap
12431                                         (dev, actions, dev_flow, attr, error))
12432                                         return -rte_errno;
12433                                 dev_flow->dv.actions[actions_n++] =
12434                                         dev_flow->dv.encap_decap->action;
12435                         } else {
12436                                 /* Handle encap without preceding decap. */
12437                                 if (flow_dv_create_action_l2_encap
12438                                     (dev, actions, dev_flow, attr->transfer,
12439                                      error))
12440                                         return -rte_errno;
12441                                 dev_flow->dv.actions[actions_n++] =
12442                                         dev_flow->dv.encap_decap->action;
12443                         }
12444                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
12445                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
12446                                 sample_act->action_flags |=
12447                                                         MLX5_FLOW_ACTION_ENCAP;
12448                         break;
12449                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
12450                         while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
12451                                 ;
12452                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
12453                                 if (flow_dv_create_action_l2_decap
12454                                     (dev, dev_flow, attr->transfer, error))
12455                                         return -rte_errno;
12456                                 dev_flow->dv.actions[actions_n++] =
12457                                         dev_flow->dv.encap_decap->action;
12458                         }
12459                         /* If decap is followed by encap, handle it at encap. */
12460                         action_flags |= MLX5_FLOW_ACTION_DECAP;
12461                         break;
12462                 case MLX5_RTE_FLOW_ACTION_TYPE_JUMP:
12463                         dev_flow->dv.actions[actions_n++] =
12464                                 (void *)(uintptr_t)action->conf;
12465                         action_flags |= MLX5_FLOW_ACTION_JUMP;
12466                         break;
12467                 case RTE_FLOW_ACTION_TYPE_JUMP:
12468                         jump_group = ((const struct rte_flow_action_jump *)
12469                                                         action->conf)->group;
12470                         grp_info.std_tbl_fix = 0;
12471                         if (dev_flow->skip_scale &
12472                                 (1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT))
12473                                 grp_info.skip_scale = 1;
12474                         else
12475                                 grp_info.skip_scale = 0;
12476                         ret = mlx5_flow_group_to_table(dev, tunnel,
12477                                                        jump_group,
12478                                                        &table,
12479                                                        &grp_info, error);
12480                         if (ret)
12481                                 return ret;
12482                         tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
12483                                                        attr->transfer,
12484                                                        !!dev_flow->external,
12485                                                        tunnel, jump_group, 0,
12486                                                        0, error);
12487                         if (!tbl)
12488                                 return rte_flow_error_set
12489                                                 (error, errno,
12490                                                  RTE_FLOW_ERROR_TYPE_ACTION,
12491                                                  NULL,
12492                                                  "cannot create jump action.");
12493                         if (flow_dv_jump_tbl_resource_register
12494                             (dev, tbl, dev_flow, error)) {
12495                                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
12496                                 return rte_flow_error_set
12497                                                 (error, errno,
12498                                                  RTE_FLOW_ERROR_TYPE_ACTION,
12499                                                  NULL,
12500                                                  "cannot create jump action.");
12501                         }
12502                         dev_flow->dv.actions[actions_n++] =
12503                                         dev_flow->dv.jump->action;
12504                         action_flags |= MLX5_FLOW_ACTION_JUMP;
12505                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;
12506                         sample_act->action_flags |= MLX5_FLOW_ACTION_JUMP;
12507                         num_of_dest++;
12508                         break;
12509                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
12510                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
12511                         if (flow_dv_convert_action_modify_mac
12512                                         (mhdr_res, actions, error))
12513                                 return -rte_errno;
12514                         action_flags |= actions->type ==
12515                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
12516                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
12517                                         MLX5_FLOW_ACTION_SET_MAC_DST;
12518                         break;
12519                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
12520                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
12521                         if (flow_dv_convert_action_modify_ipv4
12522                                         (mhdr_res, actions, error))
12523                                 return -rte_errno;
12524                         action_flags |= actions->type ==
12525                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
12526                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
12527                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
12528                         break;
12529                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
12530                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
12531                         if (flow_dv_convert_action_modify_ipv6
12532                                         (mhdr_res, actions, error))
12533                                 return -rte_errno;
12534                         action_flags |= actions->type ==
12535                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
12536                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
12537                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
12538                         break;
12539                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
12540                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
12541                         if (flow_dv_convert_action_modify_tp
12542                                         (mhdr_res, actions, items,
12543                                          &flow_attr, dev_flow, !!(action_flags &
12544                                          MLX5_FLOW_ACTION_DECAP), error))
12545                                 return -rte_errno;
12546                         action_flags |= actions->type ==
12547                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
12548                                         MLX5_FLOW_ACTION_SET_TP_SRC :
12549                                         MLX5_FLOW_ACTION_SET_TP_DST;
12550                         break;
12551                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
12552                         if (flow_dv_convert_action_modify_dec_ttl
12553                                         (mhdr_res, items, &flow_attr, dev_flow,
12554                                          !!(action_flags &
12555                                          MLX5_FLOW_ACTION_DECAP), error))
12556                                 return -rte_errno;
12557                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
12558                         break;
12559                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
12560                         if (flow_dv_convert_action_modify_ttl
12561                                         (mhdr_res, actions, items, &flow_attr,
12562                                          dev_flow, !!(action_flags &
12563                                          MLX5_FLOW_ACTION_DECAP), error))
12564                                 return -rte_errno;
12565                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
12566                         break;
12567                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
12568                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
12569                         if (flow_dv_convert_action_modify_tcp_seq
12570                                         (mhdr_res, actions, error))
12571                                 return -rte_errno;
12572                         action_flags |= actions->type ==
12573                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
12574                                         MLX5_FLOW_ACTION_INC_TCP_SEQ :
12575                                         MLX5_FLOW_ACTION_DEC_TCP_SEQ;
12576                         break;
12577
12578                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
12579                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
12580                         if (flow_dv_convert_action_modify_tcp_ack
12581                                         (mhdr_res, actions, error))
12582                                 return -rte_errno;
12583                         action_flags |= actions->type ==
12584                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
12585                                         MLX5_FLOW_ACTION_INC_TCP_ACK :
12586                                         MLX5_FLOW_ACTION_DEC_TCP_ACK;
12587                         break;
12588                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
12589                         if (flow_dv_convert_action_set_reg
12590                                         (mhdr_res, actions, error))
12591                                 return -rte_errno;
12592                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
12593                         break;
12594                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
12595                         if (flow_dv_convert_action_copy_mreg
12596                                         (dev, mhdr_res, actions, error))
12597                                 return -rte_errno;
12598                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
12599                         break;
12600                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
12601                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
12602                         dev_flow->handle->fate_action =
12603                                         MLX5_FLOW_FATE_DEFAULT_MISS;
12604                         break;
12605                 case RTE_FLOW_ACTION_TYPE_METER:
12606                         if (!wks->fm)
12607                                 return rte_flow_error_set(error, rte_errno,
12608                                         RTE_FLOW_ERROR_TYPE_ACTION,
12609                                         NULL, "Failed to get meter in flow.");
12610                         /* Set the meter action. */
12611                         dev_flow->dv.actions[actions_n++] =
12612                                 wks->fm->meter_action;
12613                         action_flags |= MLX5_FLOW_ACTION_METER;
12614                         break;
12615                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
12616                         if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,
12617                                                               actions, error))
12618                                 return -rte_errno;
12619                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
12620                         break;
12621                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
12622                         if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,
12623                                                               actions, error))
12624                                 return -rte_errno;
12625                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
12626                         break;
12627                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
12628                         sample_act_pos = actions_n;
12629                         sample = (const struct rte_flow_action_sample *)
12630                                  action->conf;
12631                         actions_n++;
12632                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
12633                         /* put encap action into group if work with port id */
12634                         if ((action_flags & MLX5_FLOW_ACTION_ENCAP) &&
12635                             (action_flags & MLX5_FLOW_ACTION_PORT_ID))
12636                                 sample_act->action_flags |=
12637                                                         MLX5_FLOW_ACTION_ENCAP;
12638                         break;
12639                 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
12640                         if (flow_dv_convert_action_modify_field
12641                                         (dev, mhdr_res, actions, attr, error))
12642                                 return -rte_errno;
12643                         action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
12644                         break;
12645                 case RTE_FLOW_ACTION_TYPE_CONNTRACK:
12646                         owner_idx = (uint32_t)(uintptr_t)action->conf;
12647                         ct = flow_aso_ct_get_by_idx(dev, owner_idx);
12648                         if (!ct)
12649                                 return rte_flow_error_set(error, EINVAL,
12650                                                 RTE_FLOW_ERROR_TYPE_ACTION,
12651                                                 NULL,
12652                                                 "Failed to get CT object.");
12653                         if (mlx5_aso_ct_available(priv->sh, ct))
12654                                 return rte_flow_error_set(error, rte_errno,
12655                                                 RTE_FLOW_ERROR_TYPE_ACTION,
12656                                                 NULL,
12657                                                 "CT is unavailable.");
12658                         if (ct->is_original)
12659                                 dev_flow->dv.actions[actions_n] =
12660                                                         ct->dr_action_orig;
12661                         else
12662                                 dev_flow->dv.actions[actions_n] =
12663                                                         ct->dr_action_rply;
12664                         flow->indirect_type = MLX5_INDIRECT_ACTION_TYPE_CT;
12665                         flow->ct = owner_idx;
12666                         __atomic_fetch_add(&ct->refcnt, 1, __ATOMIC_RELAXED);
12667                         actions_n++;
12668                         action_flags |= MLX5_FLOW_ACTION_CT;
12669                         break;
12670                 case RTE_FLOW_ACTION_TYPE_END:
12671                         actions_end = true;
12672                         if (mhdr_res->actions_num) {
12673                                 /* create modify action if needed. */
12674                                 if (flow_dv_modify_hdr_resource_register
12675                                         (dev, mhdr_res, dev_flow, error))
12676                                         return -rte_errno;
12677                                 dev_flow->dv.actions[modify_action_position] =
12678                                         handle->dvh.modify_hdr->action;
12679                         }
12680                         /*
12681                          * Handle AGE and COUNT action by single HW counter
12682                          * when they are not shared.
12683                          */
12684                         if (action_flags & MLX5_FLOW_ACTION_AGE) {
12685                                 if ((non_shared_age &&
12686                                      count && !count->shared) ||
12687                                     !(priv->sh->flow_hit_aso_en &&
12688                                       (attr->group || attr->transfer))) {
12689                                         /* Creates age by counters. */
12690                                         cnt_act = flow_dv_prepare_counter
12691                                                                 (dev, dev_flow,
12692                                                                  flow, count,
12693                                                                  non_shared_age,
12694                                                                  error);
12695                                         if (!cnt_act)
12696                                                 return -rte_errno;
12697                                         dev_flow->dv.actions[age_act_pos] =
12698                                                                 cnt_act->action;
12699                                         break;
12700                                 }
12701                                 if (!flow->age && non_shared_age) {
12702                                         flow->age = flow_dv_aso_age_alloc
12703                                                                 (dev, error);
12704                                         if (!flow->age)
12705                                                 return -rte_errno;
12706                                         flow_dv_aso_age_params_init
12707                                                     (dev, flow->age,
12708                                                      non_shared_age->context ?
12709                                                      non_shared_age->context :
12710                                                      (void *)(uintptr_t)
12711                                                      (dev_flow->flow_idx),
12712                                                      non_shared_age->timeout);
12713                                 }
12714                                 age_act = flow_aso_age_get_by_idx(dev,
12715                                                                   flow->age);
12716                                 dev_flow->dv.actions[age_act_pos] =
12717                                                              age_act->dr_action;
12718                         }
12719                         if (action_flags & MLX5_FLOW_ACTION_COUNT) {
12720                                 /*
12721                                  * Create one count action, to be used
12722                                  * by all sub-flows.
12723                                  */
12724                                 cnt_act = flow_dv_prepare_counter(dev, dev_flow,
12725                                                                   flow, count,
12726                                                                   NULL, error);
12727                                 if (!cnt_act)
12728                                         return -rte_errno;
12729                                 dev_flow->dv.actions[actions_n++] =
12730                                                                 cnt_act->action;
12731                         }
12732                 default:
12733                         break;
12734                 }
12735                 if (mhdr_res->actions_num &&
12736                     modify_action_position == UINT32_MAX)
12737                         modify_action_position = actions_n++;
12738         }
12739         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
12740                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
12741                 int item_type = items->type;
12742
12743                 if (!mlx5_flow_os_item_supported(item_type))
12744                         return rte_flow_error_set(error, ENOTSUP,
12745                                                   RTE_FLOW_ERROR_TYPE_ITEM,
12746                                                   NULL, "item not supported");
12747                 switch (item_type) {
12748                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
12749                         flow_dv_translate_item_port_id
12750                                 (dev, match_mask, match_value, items, attr);
12751                         last_item = MLX5_FLOW_ITEM_PORT_ID;
12752                         break;
12753                 case RTE_FLOW_ITEM_TYPE_ETH:
12754                         flow_dv_translate_item_eth(match_mask, match_value,
12755                                                    items, tunnel,
12756                                                    dev_flow->dv.group);
12757                         matcher.priority = action_flags &
12758                                         MLX5_FLOW_ACTION_DEFAULT_MISS &&
12759                                         !dev_flow->external ?
12760                                         MLX5_PRIORITY_MAP_L3 :
12761                                         MLX5_PRIORITY_MAP_L2;
12762                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
12763                                              MLX5_FLOW_LAYER_OUTER_L2;
12764                         break;
12765                 case RTE_FLOW_ITEM_TYPE_VLAN:
12766                         flow_dv_translate_item_vlan(dev_flow,
12767                                                     match_mask, match_value,
12768                                                     items, tunnel,
12769                                                     dev_flow->dv.group);
12770                         matcher.priority = MLX5_PRIORITY_MAP_L2;
12771                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
12772                                               MLX5_FLOW_LAYER_INNER_VLAN) :
12773                                              (MLX5_FLOW_LAYER_OUTER_L2 |
12774                                               MLX5_FLOW_LAYER_OUTER_VLAN);
12775                         break;
12776                 case RTE_FLOW_ITEM_TYPE_IPV4:
12777                         mlx5_flow_tunnel_ip_check(items, next_protocol,
12778                                                   &item_flags, &tunnel);
12779                         flow_dv_translate_item_ipv4(match_mask, match_value,
12780                                                     items, tunnel,
12781                                                     dev_flow->dv.group);
12782                         matcher.priority = MLX5_PRIORITY_MAP_L3;
12783                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
12784                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
12785                         if (items->mask != NULL &&
12786                             ((const struct rte_flow_item_ipv4 *)
12787                              items->mask)->hdr.next_proto_id) {
12788                                 next_protocol =
12789                                         ((const struct rte_flow_item_ipv4 *)
12790                                          (items->spec))->hdr.next_proto_id;
12791                                 next_protocol &=
12792                                         ((const struct rte_flow_item_ipv4 *)
12793                                          (items->mask))->hdr.next_proto_id;
12794                         } else {
12795                                 /* Reset for inner layer. */
12796                                 next_protocol = 0xff;
12797                         }
12798                         break;
12799                 case RTE_FLOW_ITEM_TYPE_IPV6:
12800                         mlx5_flow_tunnel_ip_check(items, next_protocol,
12801                                                   &item_flags, &tunnel);
12802                         flow_dv_translate_item_ipv6(match_mask, match_value,
12803                                                     items, tunnel,
12804                                                     dev_flow->dv.group);
12805                         matcher.priority = MLX5_PRIORITY_MAP_L3;
12806                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
12807                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
12808                         if (items->mask != NULL &&
12809                             ((const struct rte_flow_item_ipv6 *)
12810                              items->mask)->hdr.proto) {
12811                                 next_protocol =
12812                                         ((const struct rte_flow_item_ipv6 *)
12813                                          items->spec)->hdr.proto;
12814                                 next_protocol &=
12815                                         ((const struct rte_flow_item_ipv6 *)
12816                                          items->mask)->hdr.proto;
12817                         } else {
12818                                 /* Reset for inner layer. */
12819                                 next_protocol = 0xff;
12820                         }
12821                         break;
12822                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
12823                         flow_dv_translate_item_ipv6_frag_ext(match_mask,
12824                                                              match_value,
12825                                                              items, tunnel);
12826                         last_item = tunnel ?
12827                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
12828                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
12829                         if (items->mask != NULL &&
12830                             ((const struct rte_flow_item_ipv6_frag_ext *)
12831                              items->mask)->hdr.next_header) {
12832                                 next_protocol =
12833                                 ((const struct rte_flow_item_ipv6_frag_ext *)
12834                                  items->spec)->hdr.next_header;
12835                                 next_protocol &=
12836                                 ((const struct rte_flow_item_ipv6_frag_ext *)
12837                                  items->mask)->hdr.next_header;
12838                         } else {
12839                                 /* Reset for inner layer. */
12840                                 next_protocol = 0xff;
12841                         }
12842                         break;
12843                 case RTE_FLOW_ITEM_TYPE_TCP:
12844                         flow_dv_translate_item_tcp(match_mask, match_value,
12845                                                    items, tunnel);
12846                         matcher.priority = MLX5_PRIORITY_MAP_L4;
12847                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
12848                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
12849                         break;
12850                 case RTE_FLOW_ITEM_TYPE_UDP:
12851                         flow_dv_translate_item_udp(match_mask, match_value,
12852                                                    items, tunnel);
12853                         matcher.priority = MLX5_PRIORITY_MAP_L4;
12854                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
12855                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
12856                         break;
12857                 case RTE_FLOW_ITEM_TYPE_GRE:
12858                         flow_dv_translate_item_gre(match_mask, match_value,
12859                                                    items, tunnel);
12860                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
12861                         last_item = MLX5_FLOW_LAYER_GRE;
12862                         break;
12863                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
12864                         flow_dv_translate_item_gre_key(match_mask,
12865                                                        match_value, items);
12866                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
12867                         break;
12868                 case RTE_FLOW_ITEM_TYPE_NVGRE:
12869                         flow_dv_translate_item_nvgre(match_mask, match_value,
12870                                                      items, tunnel);
12871                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
12872                         last_item = MLX5_FLOW_LAYER_GRE;
12873                         break;
12874                 case RTE_FLOW_ITEM_TYPE_VXLAN:
12875                         flow_dv_translate_item_vxlan(match_mask, match_value,
12876                                                      items, tunnel);
12877                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
12878                         last_item = MLX5_FLOW_LAYER_VXLAN;
12879                         break;
12880                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
12881                         flow_dv_translate_item_vxlan_gpe(match_mask,
12882                                                          match_value, items,
12883                                                          tunnel);
12884                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
12885                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
12886                         break;
12887                 case RTE_FLOW_ITEM_TYPE_GENEVE:
12888                         flow_dv_translate_item_geneve(match_mask, match_value,
12889                                                       items, tunnel);
12890                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
12891                         last_item = MLX5_FLOW_LAYER_GENEVE;
12892                         break;
12893                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
12894                         ret = flow_dv_translate_item_geneve_opt(dev, match_mask,
12895                                                           match_value,
12896                                                           items, error);
12897                         if (ret)
12898                                 return rte_flow_error_set(error, -ret,
12899                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
12900                                         "cannot create GENEVE TLV option");
12901                         flow->geneve_tlv_option = 1;
12902                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
12903                         break;
12904                 case RTE_FLOW_ITEM_TYPE_MPLS:
12905                         flow_dv_translate_item_mpls(match_mask, match_value,
12906                                                     items, last_item, tunnel);
12907                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
12908                         last_item = MLX5_FLOW_LAYER_MPLS;
12909                         break;
12910                 case RTE_FLOW_ITEM_TYPE_MARK:
12911                         flow_dv_translate_item_mark(dev, match_mask,
12912                                                     match_value, items);
12913                         last_item = MLX5_FLOW_ITEM_MARK;
12914                         break;
12915                 case RTE_FLOW_ITEM_TYPE_META:
12916                         flow_dv_translate_item_meta(dev, match_mask,
12917                                                     match_value, attr, items);
12918                         last_item = MLX5_FLOW_ITEM_METADATA;
12919                         break;
12920                 case RTE_FLOW_ITEM_TYPE_ICMP:
12921                         flow_dv_translate_item_icmp(match_mask, match_value,
12922                                                     items, tunnel);
12923                         last_item = MLX5_FLOW_LAYER_ICMP;
12924                         break;
12925                 case RTE_FLOW_ITEM_TYPE_ICMP6:
12926                         flow_dv_translate_item_icmp6(match_mask, match_value,
12927                                                       items, tunnel);
12928                         last_item = MLX5_FLOW_LAYER_ICMP6;
12929                         break;
12930                 case RTE_FLOW_ITEM_TYPE_TAG:
12931                         flow_dv_translate_item_tag(dev, match_mask,
12932                                                    match_value, items);
12933                         last_item = MLX5_FLOW_ITEM_TAG;
12934                         break;
12935                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
12936                         flow_dv_translate_mlx5_item_tag(dev, match_mask,
12937                                                         match_value, items);
12938                         last_item = MLX5_FLOW_ITEM_TAG;
12939                         break;
12940                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
12941                         flow_dv_translate_item_tx_queue(dev, match_mask,
12942                                                         match_value,
12943                                                         items);
12944                         last_item = MLX5_FLOW_ITEM_TX_QUEUE;
12945                         break;
12946                 case RTE_FLOW_ITEM_TYPE_GTP:
12947                         flow_dv_translate_item_gtp(match_mask, match_value,
12948                                                    items, tunnel);
12949                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
12950                         last_item = MLX5_FLOW_LAYER_GTP;
12951                         break;
12952                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
12953                         ret = flow_dv_translate_item_gtp_psc(match_mask,
12954                                                           match_value,
12955                                                           items);
12956                         if (ret)
12957                                 return rte_flow_error_set(error, -ret,
12958                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
12959                                         "cannot create GTP PSC item");
12960                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
12961                         break;
12962                 case RTE_FLOW_ITEM_TYPE_ECPRI:
12963                         if (!mlx5_flex_parser_ecpri_exist(dev)) {
12964                                 /* Create it only the first time to be used. */
12965                                 ret = mlx5_flex_parser_ecpri_alloc(dev);
12966                                 if (ret)
12967                                         return rte_flow_error_set
12968                                                 (error, -ret,
12969                                                 RTE_FLOW_ERROR_TYPE_ITEM,
12970                                                 NULL,
12971                                                 "cannot create eCPRI parser");
12972                         }
12973                         /* Adjust the length matcher and device flow value. */
12974                         matcher.mask.size = MLX5_ST_SZ_BYTES(fte_match_param);
12975                         dev_flow->dv.value.size =
12976                                         MLX5_ST_SZ_BYTES(fte_match_param);
12977                         flow_dv_translate_item_ecpri(dev, match_mask,
12978                                                      match_value, items);
12979                         /* No other protocol should follow eCPRI layer. */
12980                         last_item = MLX5_FLOW_LAYER_ECPRI;
12981                         break;
12982                 case RTE_FLOW_ITEM_TYPE_INTEGRITY:
12983                         flow_dv_translate_item_integrity(match_mask,
12984                                                          match_value,
12985                                                          head_item, items);
12986                         break;
12987                 case RTE_FLOW_ITEM_TYPE_CONNTRACK:
12988                         flow_dv_translate_item_aso_ct(dev, match_mask,
12989                                                       match_value, items);
12990                         break;
12991                 default:
12992                         break;
12993                 }
12994                 item_flags |= last_item;
12995         }
12996         /*
12997          * When E-Switch mode is enabled, we have two cases where we need to
12998          * set the source port manually.
12999          * The first one, is in case of Nic steering rule, and the second is
13000          * E-Switch rule where no port_id item was found. In both cases
13001          * the source port is set according the current port in use.
13002          */
13003         if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
13004             (priv->representor || priv->master)) {
13005                 if (flow_dv_translate_item_port_id(dev, match_mask,
13006                                                    match_value, NULL, attr))
13007                         return -rte_errno;
13008         }
13009 #ifdef RTE_LIBRTE_MLX5_DEBUG
13010         MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
13011                                               dev_flow->dv.value.buf));
13012 #endif
13013         /*
13014          * Layers may be already initialized from prefix flow if this dev_flow
13015          * is the suffix flow.
13016          */
13017         handle->layers |= item_flags;
13018         if (action_flags & MLX5_FLOW_ACTION_RSS)
13019                 flow_dv_hashfields_set(dev_flow, rss_desc);
13020         /* If has RSS action in the sample action, the Sample/Mirror resource
13021          * should be registered after the hash filed be update.
13022          */
13023         if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
13024                 ret = flow_dv_translate_action_sample(dev,
13025                                                       sample,
13026                                                       dev_flow, attr,
13027                                                       &num_of_dest,
13028                                                       sample_actions,
13029                                                       &sample_res,
13030                                                       error);
13031                 if (ret < 0)
13032                         return ret;
13033                 ret = flow_dv_create_action_sample(dev,
13034                                                    dev_flow,
13035                                                    num_of_dest,
13036                                                    &sample_res,
13037                                                    &mdest_res,
13038                                                    sample_actions,
13039                                                    action_flags,
13040                                                    error);
13041                 if (ret < 0)
13042                         return rte_flow_error_set
13043                                                 (error, rte_errno,
13044                                                 RTE_FLOW_ERROR_TYPE_ACTION,
13045                                                 NULL,
13046                                                 "cannot create sample action");
13047                 if (num_of_dest > 1) {
13048                         dev_flow->dv.actions[sample_act_pos] =
13049                         dev_flow->dv.dest_array_res->action;
13050                 } else {
13051                         dev_flow->dv.actions[sample_act_pos] =
13052                         dev_flow->dv.sample_res->verbs_action;
13053                 }
13054         }
13055         /*
13056          * For multiple destination (sample action with ratio=1), the encap
13057          * action and port id action will be combined into group action.
13058          * So need remove the original these actions in the flow and only
13059          * use the sample action instead of.
13060          */
13061         if (num_of_dest > 1 &&
13062             (sample_act->dr_port_id_action || sample_act->dr_jump_action)) {
13063                 int i;
13064                 void *temp_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
13065
13066                 for (i = 0; i < actions_n; i++) {
13067                         if ((sample_act->dr_encap_action &&
13068                                 sample_act->dr_encap_action ==
13069                                 dev_flow->dv.actions[i]) ||
13070                                 (sample_act->dr_port_id_action &&
13071                                 sample_act->dr_port_id_action ==
13072                                 dev_flow->dv.actions[i]) ||
13073                                 (sample_act->dr_jump_action &&
13074                                 sample_act->dr_jump_action ==
13075                                 dev_flow->dv.actions[i]))
13076                                 continue;
13077                         temp_actions[tmp_actions_n++] = dev_flow->dv.actions[i];
13078                 }
13079                 memcpy((void *)dev_flow->dv.actions,
13080                                 (void *)temp_actions,
13081                                 tmp_actions_n * sizeof(void *));
13082                 actions_n = tmp_actions_n;
13083         }
13084         dev_flow->dv.actions_n = actions_n;
13085         dev_flow->act_flags = action_flags;
13086         if (wks->skip_matcher_reg)
13087                 return 0;
13088         /* Register matcher. */
13089         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
13090                                     matcher.mask.size);
13091         matcher.priority = mlx5_get_matcher_priority(dev, attr,
13092                                         matcher.priority);
13093         /* reserved field no needs to be set to 0 here. */
13094         tbl_key.is_fdb = attr->transfer;
13095         tbl_key.is_egress = attr->egress;
13096         tbl_key.level = dev_flow->dv.group;
13097         tbl_key.id = dev_flow->dv.table_id;
13098         if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow,
13099                                      tunnel, attr->group, error))
13100                 return -rte_errno;
13101         return 0;
13102 }
13103
13104 /**
13105  * Set hash RX queue by hash fields (see enum ibv_rx_hash_fields)
13106  * and tunnel.
13107  *
13108  * @param[in, out] action
13109  *   Shred RSS action holding hash RX queue objects.
13110  * @param[in] hash_fields
13111  *   Defines combination of packet fields to participate in RX hash.
13112  * @param[in] tunnel
13113  *   Tunnel type
13114  * @param[in] hrxq_idx
13115  *   Hash RX queue index to set.
13116  *
13117  * @return
13118  *   0 on success, otherwise negative errno value.
13119  */
13120 static int
13121 __flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action,
13122                               const uint64_t hash_fields,
13123                               uint32_t hrxq_idx)
13124 {
13125         uint32_t *hrxqs = action->hrxq;
13126
13127         switch (hash_fields & ~IBV_RX_HASH_INNER) {
13128         case MLX5_RSS_HASH_IPV4:
13129                 /* fall-through. */
13130         case MLX5_RSS_HASH_IPV4_DST_ONLY:
13131                 /* fall-through. */
13132         case MLX5_RSS_HASH_IPV4_SRC_ONLY:
13133                 hrxqs[0] = hrxq_idx;
13134                 return 0;
13135         case MLX5_RSS_HASH_IPV4_TCP:
13136                 /* fall-through. */
13137         case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
13138                 /* fall-through. */
13139         case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
13140                 hrxqs[1] = hrxq_idx;
13141                 return 0;
13142         case MLX5_RSS_HASH_IPV4_UDP:
13143                 /* fall-through. */
13144         case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
13145                 /* fall-through. */
13146         case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
13147                 hrxqs[2] = hrxq_idx;
13148                 return 0;
13149         case MLX5_RSS_HASH_IPV6:
13150                 /* fall-through. */
13151         case MLX5_RSS_HASH_IPV6_DST_ONLY:
13152                 /* fall-through. */
13153         case MLX5_RSS_HASH_IPV6_SRC_ONLY:
13154                 hrxqs[3] = hrxq_idx;
13155                 return 0;
13156         case MLX5_RSS_HASH_IPV6_TCP:
13157                 /* fall-through. */
13158         case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
13159                 /* fall-through. */
13160         case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
13161                 hrxqs[4] = hrxq_idx;
13162                 return 0;
13163         case MLX5_RSS_HASH_IPV6_UDP:
13164                 /* fall-through. */
13165         case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
13166                 /* fall-through. */
13167         case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
13168                 hrxqs[5] = hrxq_idx;
13169                 return 0;
13170         case MLX5_RSS_HASH_NONE:
13171                 hrxqs[6] = hrxq_idx;
13172                 return 0;
13173         default:
13174                 return -1;
13175         }
13176 }
13177
13178 /**
13179  * Look up for hash RX queue by hash fields (see enum ibv_rx_hash_fields)
13180  * and tunnel.
13181  *
13182  * @param[in] dev
13183  *   Pointer to the Ethernet device structure.
13184  * @param[in] idx
13185  *   Shared RSS action ID holding hash RX queue objects.
13186  * @param[in] hash_fields
13187  *   Defines combination of packet fields to participate in RX hash.
13188  * @param[in] tunnel
13189  *   Tunnel type
13190  *
13191  * @return
13192  *   Valid hash RX queue index, otherwise 0.
13193  */
13194 static uint32_t
13195 __flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
13196                                  const uint64_t hash_fields)
13197 {
13198         struct mlx5_priv *priv = dev->data->dev_private;
13199         struct mlx5_shared_action_rss *shared_rss =
13200             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
13201         const uint32_t *hrxqs = shared_rss->hrxq;
13202
13203         switch (hash_fields & ~IBV_RX_HASH_INNER) {
13204         case MLX5_RSS_HASH_IPV4:
13205                 /* fall-through. */
13206         case MLX5_RSS_HASH_IPV4_DST_ONLY:
13207                 /* fall-through. */
13208         case MLX5_RSS_HASH_IPV4_SRC_ONLY:
13209                 return hrxqs[0];
13210         case MLX5_RSS_HASH_IPV4_TCP:
13211                 /* fall-through. */
13212         case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
13213                 /* fall-through. */
13214         case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
13215                 return hrxqs[1];
13216         case MLX5_RSS_HASH_IPV4_UDP:
13217                 /* fall-through. */
13218         case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
13219                 /* fall-through. */
13220         case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
13221                 return hrxqs[2];
13222         case MLX5_RSS_HASH_IPV6:
13223                 /* fall-through. */
13224         case MLX5_RSS_HASH_IPV6_DST_ONLY:
13225                 /* fall-through. */
13226         case MLX5_RSS_HASH_IPV6_SRC_ONLY:
13227                 return hrxqs[3];
13228         case MLX5_RSS_HASH_IPV6_TCP:
13229                 /* fall-through. */
13230         case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
13231                 /* fall-through. */
13232         case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
13233                 return hrxqs[4];
13234         case MLX5_RSS_HASH_IPV6_UDP:
13235                 /* fall-through. */
13236         case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
13237                 /* fall-through. */
13238         case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
13239                 return hrxqs[5];
13240         case MLX5_RSS_HASH_NONE:
13241                 return hrxqs[6];
13242         default:
13243                 return 0;
13244         }
13245
13246 }
13247
13248 /**
13249  * Apply the flow to the NIC, lock free,
13250  * (mutex should be acquired by caller).
13251  *
13252  * @param[in] dev
13253  *   Pointer to the Ethernet device structure.
13254  * @param[in, out] flow
13255  *   Pointer to flow structure.
13256  * @param[out] error
13257  *   Pointer to error structure.
13258  *
13259  * @return
13260  *   0 on success, a negative errno value otherwise and rte_errno is set.
13261  */
13262 static int
13263 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
13264               struct rte_flow_error *error)
13265 {
13266         struct mlx5_flow_dv_workspace *dv;
13267         struct mlx5_flow_handle *dh;
13268         struct mlx5_flow_handle_dv *dv_h;
13269         struct mlx5_flow *dev_flow;
13270         struct mlx5_priv *priv = dev->data->dev_private;
13271         uint32_t handle_idx;
13272         int n;
13273         int err;
13274         int idx;
13275         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
13276         struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
13277
13278         MLX5_ASSERT(wks);
13279         for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
13280                 dev_flow = &wks->flows[idx];
13281                 dv = &dev_flow->dv;
13282                 dh = dev_flow->handle;
13283                 dv_h = &dh->dvh;
13284                 n = dv->actions_n;
13285                 if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
13286                         if (dv->transfer) {
13287                                 MLX5_ASSERT(priv->sh->dr_drop_action);
13288                                 dv->actions[n++] = priv->sh->dr_drop_action;
13289                         } else {
13290 #ifdef HAVE_MLX5DV_DR
13291                                 /* DR supports drop action placeholder. */
13292                                 MLX5_ASSERT(priv->sh->dr_drop_action);
13293                                 dv->actions[n++] = priv->sh->dr_drop_action;
13294 #else
13295                                 /* For DV we use the explicit drop queue. */
13296                                 MLX5_ASSERT(priv->drop_queue.hrxq);
13297                                 dv->actions[n++] =
13298                                                 priv->drop_queue.hrxq->action;
13299 #endif
13300                         }
13301                 } else if ((dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
13302                            !dv_h->rix_sample && !dv_h->rix_dest_array)) {
13303                         struct mlx5_hrxq *hrxq;
13304                         uint32_t hrxq_idx;
13305
13306                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
13307                                                     &hrxq_idx);
13308                         if (!hrxq) {
13309                                 rte_flow_error_set
13310                                         (error, rte_errno,
13311                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13312                                          "cannot get hash queue");
13313                                 goto error;
13314                         }
13315                         dh->rix_hrxq = hrxq_idx;
13316                         dv->actions[n++] = hrxq->action;
13317                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
13318                         struct mlx5_hrxq *hrxq = NULL;
13319                         uint32_t hrxq_idx;
13320
13321                         hrxq_idx = __flow_dv_action_rss_hrxq_lookup(dev,
13322                                                 rss_desc->shared_rss,
13323                                                 dev_flow->hash_fields);
13324                         if (hrxq_idx)
13325                                 hrxq = mlx5_ipool_get
13326                                         (priv->sh->ipool[MLX5_IPOOL_HRXQ],
13327                                          hrxq_idx);
13328                         if (!hrxq) {
13329                                 rte_flow_error_set
13330                                         (error, rte_errno,
13331                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13332                                          "cannot get hash queue");
13333                                 goto error;
13334                         }
13335                         dh->rix_srss = rss_desc->shared_rss;
13336                         dv->actions[n++] = hrxq->action;
13337                 } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
13338                         if (!priv->sh->default_miss_action) {
13339                                 rte_flow_error_set
13340                                         (error, rte_errno,
13341                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13342                                          "default miss action not be created.");
13343                                 goto error;
13344                         }
13345                         dv->actions[n++] = priv->sh->default_miss_action;
13346                 }
13347                 err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object,
13348                                                (void *)&dv->value, n,
13349                                                dv->actions, &dh->drv_flow);
13350                 if (err) {
13351                         rte_flow_error_set(error, errno,
13352                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13353                                            NULL,
13354                                            "hardware refuses to create flow");
13355                         goto error;
13356                 }
13357                 if (priv->vmwa_context &&
13358                     dh->vf_vlan.tag && !dh->vf_vlan.created) {
13359                         /*
13360                          * The rule contains the VLAN pattern.
13361                          * For VF we are going to create VLAN
13362                          * interface to make hypervisor set correct
13363                          * e-Switch vport context.
13364                          */
13365                         mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
13366                 }
13367         }
13368         return 0;
13369 error:
13370         err = rte_errno; /* Save rte_errno before cleanup. */
13371         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
13372                        handle_idx, dh, next) {
13373                 /* hrxq is union, don't clear it if the flag is not set. */
13374                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) {
13375                         mlx5_hrxq_release(dev, dh->rix_hrxq);
13376                         dh->rix_hrxq = 0;
13377                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
13378                         dh->rix_srss = 0;
13379                 }
13380                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
13381                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
13382         }
13383         rte_errno = err; /* Restore rte_errno. */
13384         return -rte_errno;
13385 }
13386
13387 void
13388 flow_dv_matcher_remove_cb(struct mlx5_cache_list *list __rte_unused,
13389                           struct mlx5_cache_entry *entry)
13390 {
13391         struct mlx5_flow_dv_matcher *cache = container_of(entry, typeof(*cache),
13392                                                           entry);
13393
13394         claim_zero(mlx5_flow_os_destroy_flow_matcher(cache->matcher_object));
13395         mlx5_free(cache);
13396 }
13397
13398 /**
13399  * Release the flow matcher.
13400  *
13401  * @param dev
13402  *   Pointer to Ethernet device.
13403  * @param port_id
13404  *   Index to port ID action resource.
13405  *
13406  * @return
13407  *   1 while a reference on it exists, 0 when freed.
13408  */
13409 static int
13410 flow_dv_matcher_release(struct rte_eth_dev *dev,
13411                         struct mlx5_flow_handle *handle)
13412 {
13413         struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
13414         struct mlx5_flow_tbl_data_entry *tbl = container_of(matcher->tbl,
13415                                                             typeof(*tbl), tbl);
13416         int ret;
13417
13418         MLX5_ASSERT(matcher->matcher_object);
13419         ret = mlx5_cache_unregister(&tbl->matchers, &matcher->entry);
13420         flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl->tbl);
13421         return ret;
13422 }
13423
13424 /**
13425  * Release encap_decap resource.
13426  *
13427  * @param list
13428  *   Pointer to the hash list.
13429  * @param entry
13430  *   Pointer to exist resource entry object.
13431  */
13432 void
13433 flow_dv_encap_decap_remove_cb(struct mlx5_hlist *list,
13434                               struct mlx5_hlist_entry *entry)
13435 {
13436         struct mlx5_dev_ctx_shared *sh = list->ctx;
13437         struct mlx5_flow_dv_encap_decap_resource *res =
13438                 container_of(entry, typeof(*res), entry);
13439
13440         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
13441         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
13442 }
13443
13444 /**
13445  * Release an encap/decap resource.
13446  *
13447  * @param dev
13448  *   Pointer to Ethernet device.
13449  * @param encap_decap_idx
13450  *   Index of encap decap resource.
13451  *
13452  * @return
13453  *   1 while a reference on it exists, 0 when freed.
13454  */
13455 static int
13456 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
13457                                      uint32_t encap_decap_idx)
13458 {
13459         struct mlx5_priv *priv = dev->data->dev_private;
13460         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
13461
13462         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
13463                                         encap_decap_idx);
13464         if (!cache_resource)
13465                 return 0;
13466         MLX5_ASSERT(cache_resource->action);
13467         return mlx5_hlist_unregister(priv->sh->encaps_decaps,
13468                                      &cache_resource->entry);
13469 }
13470
13471 /**
13472  * Release an jump to table action resource.
13473  *
13474  * @param dev
13475  *   Pointer to Ethernet device.
13476  * @param rix_jump
13477  *   Index to the jump action resource.
13478  *
13479  * @return
13480  *   1 while a reference on it exists, 0 when freed.
13481  */
13482 static int
13483 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
13484                                   uint32_t rix_jump)
13485 {
13486         struct mlx5_priv *priv = dev->data->dev_private;
13487         struct mlx5_flow_tbl_data_entry *tbl_data;
13488
13489         tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
13490                                   rix_jump);
13491         if (!tbl_data)
13492                 return 0;
13493         return flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl_data->tbl);
13494 }
13495
13496 void
13497 flow_dv_modify_remove_cb(struct mlx5_hlist *list __rte_unused,
13498                          struct mlx5_hlist_entry *entry)
13499 {
13500         struct mlx5_flow_dv_modify_hdr_resource *res =
13501                 container_of(entry, typeof(*res), entry);
13502
13503         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
13504         mlx5_free(entry);
13505 }
13506
13507 /**
13508  * Release a modify-header resource.
13509  *
13510  * @param dev
13511  *   Pointer to Ethernet device.
13512  * @param handle
13513  *   Pointer to mlx5_flow_handle.
13514  *
13515  * @return
13516  *   1 while a reference on it exists, 0 when freed.
13517  */
13518 static int
13519 flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev,
13520                                     struct mlx5_flow_handle *handle)
13521 {
13522         struct mlx5_priv *priv = dev->data->dev_private;
13523         struct mlx5_flow_dv_modify_hdr_resource *entry = handle->dvh.modify_hdr;
13524
13525         MLX5_ASSERT(entry->action);
13526         return mlx5_hlist_unregister(priv->sh->modify_cmds, &entry->entry);
13527 }
13528
13529 void
13530 flow_dv_port_id_remove_cb(struct mlx5_cache_list *list,
13531                           struct mlx5_cache_entry *entry)
13532 {
13533         struct mlx5_dev_ctx_shared *sh = list->ctx;
13534         struct mlx5_flow_dv_port_id_action_resource *cache =
13535                         container_of(entry, typeof(*cache), entry);
13536
13537         claim_zero(mlx5_flow_os_destroy_flow_action(cache->action));
13538         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], cache->idx);
13539 }
13540
13541 /**
13542  * Release port ID action resource.
13543  *
13544  * @param dev
13545  *   Pointer to Ethernet device.
13546  * @param handle
13547  *   Pointer to mlx5_flow_handle.
13548  *
13549  * @return
13550  *   1 while a reference on it exists, 0 when freed.
13551  */
13552 static int
13553 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
13554                                         uint32_t port_id)
13555 {
13556         struct mlx5_priv *priv = dev->data->dev_private;
13557         struct mlx5_flow_dv_port_id_action_resource *cache;
13558
13559         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], port_id);
13560         if (!cache)
13561                 return 0;
13562         MLX5_ASSERT(cache->action);
13563         return mlx5_cache_unregister(&priv->sh->port_id_action_list,
13564                                      &cache->entry);
13565 }
13566
13567 /**
13568  * Release shared RSS action resource.
13569  *
13570  * @param dev
13571  *   Pointer to Ethernet device.
13572  * @param srss
13573  *   Shared RSS action index.
13574  */
13575 static void
13576 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss)
13577 {
13578         struct mlx5_priv *priv = dev->data->dev_private;
13579         struct mlx5_shared_action_rss *shared_rss;
13580
13581         shared_rss = mlx5_ipool_get
13582                         (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], srss);
13583         __atomic_sub_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
13584 }
13585
13586 void
13587 flow_dv_push_vlan_remove_cb(struct mlx5_cache_list *list,
13588                             struct mlx5_cache_entry *entry)
13589 {
13590         struct mlx5_dev_ctx_shared *sh = list->ctx;
13591         struct mlx5_flow_dv_push_vlan_action_resource *cache =
13592                         container_of(entry, typeof(*cache), entry);
13593
13594         claim_zero(mlx5_flow_os_destroy_flow_action(cache->action));
13595         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], cache->idx);
13596 }
13597
13598 /**
13599  * Release push vlan action resource.
13600  *
13601  * @param dev
13602  *   Pointer to Ethernet device.
13603  * @param handle
13604  *   Pointer to mlx5_flow_handle.
13605  *
13606  * @return
13607  *   1 while a reference on it exists, 0 when freed.
13608  */
13609 static int
13610 flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev,
13611                                           struct mlx5_flow_handle *handle)
13612 {
13613         struct mlx5_priv *priv = dev->data->dev_private;
13614         struct mlx5_flow_dv_push_vlan_action_resource *cache;
13615         uint32_t idx = handle->dvh.rix_push_vlan;
13616
13617         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
13618         if (!cache)
13619                 return 0;
13620         MLX5_ASSERT(cache->action);
13621         return mlx5_cache_unregister(&priv->sh->push_vlan_action_list,
13622                                      &cache->entry);
13623 }
13624
13625 /**
13626  * Release the fate resource.
13627  *
13628  * @param dev
13629  *   Pointer to Ethernet device.
13630  * @param handle
13631  *   Pointer to mlx5_flow_handle.
13632  */
13633 static void
13634 flow_dv_fate_resource_release(struct rte_eth_dev *dev,
13635                                struct mlx5_flow_handle *handle)
13636 {
13637         if (!handle->rix_fate)
13638                 return;
13639         switch (handle->fate_action) {
13640         case MLX5_FLOW_FATE_QUEUE:
13641                 if (!handle->dvh.rix_sample && !handle->dvh.rix_dest_array)
13642                         mlx5_hrxq_release(dev, handle->rix_hrxq);
13643                 break;
13644         case MLX5_FLOW_FATE_JUMP:
13645                 flow_dv_jump_tbl_resource_release(dev, handle->rix_jump);
13646                 break;
13647         case MLX5_FLOW_FATE_PORT_ID:
13648                 flow_dv_port_id_action_resource_release(dev,
13649                                 handle->rix_port_id_action);
13650                 break;
13651         default:
13652                 DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
13653                 break;
13654         }
13655         handle->rix_fate = 0;
13656 }
13657
13658 void
13659 flow_dv_sample_remove_cb(struct mlx5_cache_list *list __rte_unused,
13660                          struct mlx5_cache_entry *entry)
13661 {
13662         struct mlx5_flow_dv_sample_resource *cache_resource =
13663                         container_of(entry, typeof(*cache_resource), entry);
13664         struct rte_eth_dev *dev = cache_resource->dev;
13665         struct mlx5_priv *priv = dev->data->dev_private;
13666
13667         if (cache_resource->verbs_action)
13668                 claim_zero(mlx5_flow_os_destroy_flow_action
13669                                 (cache_resource->verbs_action));
13670         if (cache_resource->normal_path_tbl)
13671                 flow_dv_tbl_resource_release(MLX5_SH(dev),
13672                         cache_resource->normal_path_tbl);
13673         flow_dv_sample_sub_actions_release(dev,
13674                                 &cache_resource->sample_idx);
13675         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
13676                         cache_resource->idx);
13677         DRV_LOG(DEBUG, "sample resource %p: removed",
13678                 (void *)cache_resource);
13679 }
13680
13681 /**
13682  * Release an sample resource.
13683  *
13684  * @param dev
13685  *   Pointer to Ethernet device.
13686  * @param handle
13687  *   Pointer to mlx5_flow_handle.
13688  *
13689  * @return
13690  *   1 while a reference on it exists, 0 when freed.
13691  */
13692 static int
13693 flow_dv_sample_resource_release(struct rte_eth_dev *dev,
13694                                      struct mlx5_flow_handle *handle)
13695 {
13696         struct mlx5_priv *priv = dev->data->dev_private;
13697         struct mlx5_flow_dv_sample_resource *cache_resource;
13698
13699         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
13700                          handle->dvh.rix_sample);
13701         if (!cache_resource)
13702                 return 0;
13703         MLX5_ASSERT(cache_resource->verbs_action);
13704         return mlx5_cache_unregister(&priv->sh->sample_action_list,
13705                                      &cache_resource->entry);
13706 }
13707
13708 void
13709 flow_dv_dest_array_remove_cb(struct mlx5_cache_list *list __rte_unused,
13710                              struct mlx5_cache_entry *entry)
13711 {
13712         struct mlx5_flow_dv_dest_array_resource *cache_resource =
13713                         container_of(entry, typeof(*cache_resource), entry);
13714         struct rte_eth_dev *dev = cache_resource->dev;
13715         struct mlx5_priv *priv = dev->data->dev_private;
13716         uint32_t i = 0;
13717
13718         MLX5_ASSERT(cache_resource->action);
13719         if (cache_resource->action)
13720                 claim_zero(mlx5_flow_os_destroy_flow_action
13721                                         (cache_resource->action));
13722         for (; i < cache_resource->num_of_dest; i++)
13723                 flow_dv_sample_sub_actions_release(dev,
13724                                 &cache_resource->sample_idx[i]);
13725         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
13726                         cache_resource->idx);
13727         DRV_LOG(DEBUG, "destination array resource %p: removed",
13728                 (void *)cache_resource);
13729 }
13730
13731 /**
13732  * Release an destination array resource.
13733  *
13734  * @param dev
13735  *   Pointer to Ethernet device.
13736  * @param handle
13737  *   Pointer to mlx5_flow_handle.
13738  *
13739  * @return
13740  *   1 while a reference on it exists, 0 when freed.
13741  */
13742 static int
13743 flow_dv_dest_array_resource_release(struct rte_eth_dev *dev,
13744                                     struct mlx5_flow_handle *handle)
13745 {
13746         struct mlx5_priv *priv = dev->data->dev_private;
13747         struct mlx5_flow_dv_dest_array_resource *cache;
13748
13749         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
13750                                handle->dvh.rix_dest_array);
13751         if (!cache)
13752                 return 0;
13753         MLX5_ASSERT(cache->action);
13754         return mlx5_cache_unregister(&priv->sh->dest_array_list,
13755                                      &cache->entry);
13756 }
13757
13758 static void
13759 flow_dv_geneve_tlv_option_resource_release(struct rte_eth_dev *dev)
13760 {
13761         struct mlx5_priv *priv = dev->data->dev_private;
13762         struct mlx5_dev_ctx_shared *sh = priv->sh;
13763         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
13764                                 sh->geneve_tlv_option_resource;
13765         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
13766         if (geneve_opt_resource) {
13767                 if (!(__atomic_sub_fetch(&geneve_opt_resource->refcnt, 1,
13768                                          __ATOMIC_RELAXED))) {
13769                         claim_zero(mlx5_devx_cmd_destroy
13770                                         (geneve_opt_resource->obj));
13771                         mlx5_free(sh->geneve_tlv_option_resource);
13772                         sh->geneve_tlv_option_resource = NULL;
13773                 }
13774         }
13775         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
13776 }
13777
13778 /**
13779  * Remove the flow from the NIC but keeps it in memory.
13780  * Lock free, (mutex should be acquired by caller).
13781  *
13782  * @param[in] dev
13783  *   Pointer to Ethernet device.
13784  * @param[in, out] flow
13785  *   Pointer to flow structure.
13786  */
13787 static void
13788 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
13789 {
13790         struct mlx5_flow_handle *dh;
13791         uint32_t handle_idx;
13792         struct mlx5_priv *priv = dev->data->dev_private;
13793
13794         if (!flow)
13795                 return;
13796         handle_idx = flow->dev_handles;
13797         while (handle_idx) {
13798                 dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
13799                                     handle_idx);
13800                 if (!dh)
13801                         return;
13802                 if (dh->drv_flow) {
13803                         claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow));
13804                         dh->drv_flow = NULL;
13805                 }
13806                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
13807                         flow_dv_fate_resource_release(dev, dh);
13808                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
13809                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
13810                 handle_idx = dh->next.next;
13811         }
13812 }
13813
13814 /**
13815  * Remove the flow from the NIC and the memory.
13816  * Lock free, (mutex should be acquired by caller).
13817  *
13818  * @param[in] dev
13819  *   Pointer to the Ethernet device structure.
13820  * @param[in, out] flow
13821  *   Pointer to flow structure.
13822  */
13823 static void
13824 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
13825 {
13826         struct mlx5_flow_handle *dev_handle;
13827         struct mlx5_priv *priv = dev->data->dev_private;
13828         struct mlx5_flow_meter_info *fm = NULL;
13829         uint32_t srss = 0;
13830
13831         if (!flow)
13832                 return;
13833         flow_dv_remove(dev, flow);
13834         if (flow->counter) {
13835                 flow_dv_counter_free(dev, flow->counter);
13836                 flow->counter = 0;
13837         }
13838         if (flow->meter) {
13839                 fm = flow_dv_meter_find_by_idx(priv, flow->meter);
13840                 if (fm)
13841                         mlx5_flow_meter_detach(priv, fm);
13842                 flow->meter = 0;
13843         }
13844         /* Keep the current age handling by default. */
13845         if (flow->indirect_type == MLX5_INDIRECT_ACTION_TYPE_CT && flow->ct)
13846                 flow_dv_aso_ct_release(dev, flow->ct);
13847         else if (flow->age)
13848                 flow_dv_aso_age_release(dev, flow->age);
13849         if (flow->geneve_tlv_option) {
13850                 flow_dv_geneve_tlv_option_resource_release(dev);
13851                 flow->geneve_tlv_option = 0;
13852         }
13853         while (flow->dev_handles) {
13854                 uint32_t tmp_idx = flow->dev_handles;
13855
13856                 dev_handle = mlx5_ipool_get(priv->sh->ipool
13857                                             [MLX5_IPOOL_MLX5_FLOW], tmp_idx);
13858                 if (!dev_handle)
13859                         return;
13860                 flow->dev_handles = dev_handle->next.next;
13861                 if (dev_handle->dvh.matcher)
13862                         flow_dv_matcher_release(dev, dev_handle);
13863                 if (dev_handle->dvh.rix_sample)
13864                         flow_dv_sample_resource_release(dev, dev_handle);
13865                 if (dev_handle->dvh.rix_dest_array)
13866                         flow_dv_dest_array_resource_release(dev, dev_handle);
13867                 if (dev_handle->dvh.rix_encap_decap)
13868                         flow_dv_encap_decap_resource_release(dev,
13869                                 dev_handle->dvh.rix_encap_decap);
13870                 if (dev_handle->dvh.modify_hdr)
13871                         flow_dv_modify_hdr_resource_release(dev, dev_handle);
13872                 if (dev_handle->dvh.rix_push_vlan)
13873                         flow_dv_push_vlan_action_resource_release(dev,
13874                                                                   dev_handle);
13875                 if (dev_handle->dvh.rix_tag)
13876                         flow_dv_tag_release(dev,
13877                                             dev_handle->dvh.rix_tag);
13878                 if (dev_handle->fate_action != MLX5_FLOW_FATE_SHARED_RSS)
13879                         flow_dv_fate_resource_release(dev, dev_handle);
13880                 else if (!srss)
13881                         srss = dev_handle->rix_srss;
13882                 if (fm && dev_handle->is_meter_flow_id &&
13883                     dev_handle->split_flow_id)
13884                         mlx5_ipool_free(fm->flow_ipool,
13885                                         dev_handle->split_flow_id);
13886                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
13887                            tmp_idx);
13888         }
13889         if (srss)
13890                 flow_dv_shared_rss_action_release(dev, srss);
13891 }
13892
13893 /**
13894  * Release array of hash RX queue objects.
13895  * Helper function.
13896  *
13897  * @param[in] dev
13898  *   Pointer to the Ethernet device structure.
13899  * @param[in, out] hrxqs
13900  *   Array of hash RX queue objects.
13901  *
13902  * @return
13903  *   Total number of references to hash RX queue objects in *hrxqs* array
13904  *   after this operation.
13905  */
13906 static int
13907 __flow_dv_hrxqs_release(struct rte_eth_dev *dev,
13908                         uint32_t (*hrxqs)[MLX5_RSS_HASH_FIELDS_LEN])
13909 {
13910         size_t i;
13911         int remaining = 0;
13912
13913         for (i = 0; i < RTE_DIM(*hrxqs); i++) {
13914                 int ret = mlx5_hrxq_release(dev, (*hrxqs)[i]);
13915
13916                 if (!ret)
13917                         (*hrxqs)[i] = 0;
13918                 remaining += ret;
13919         }
13920         return remaining;
13921 }
13922
13923 /**
13924  * Release all hash RX queue objects representing shared RSS action.
13925  *
13926  * @param[in] dev
13927  *   Pointer to the Ethernet device structure.
13928  * @param[in, out] action
13929  *   Shared RSS action to remove hash RX queue objects from.
13930  *
13931  * @return
13932  *   Total number of references to hash RX queue objects stored in *action*
13933  *   after this operation.
13934  *   Expected to be 0 if no external references held.
13935  */
13936 static int
13937 __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
13938                                  struct mlx5_shared_action_rss *shared_rss)
13939 {
13940         return __flow_dv_hrxqs_release(dev, &shared_rss->hrxq);
13941 }
13942
13943 /**
13944  * Adjust L3/L4 hash value of pre-created shared RSS hrxq according to
13945  * user input.
13946  *
13947  * Only one hash value is available for one L3+L4 combination:
13948  * for example:
13949  * MLX5_RSS_HASH_IPV4, MLX5_RSS_HASH_IPV4_SRC_ONLY, and
13950  * MLX5_RSS_HASH_IPV4_DST_ONLY are mutually exclusive so they can share
13951  * same slot in mlx5_rss_hash_fields.
13952  *
13953  * @param[in] rss
13954  *   Pointer to the shared action RSS conf.
13955  * @param[in, out] hash_field
13956  *   hash_field variable needed to be adjusted.
13957  *
13958  * @return
13959  *   void
13960  */
13961 static void
13962 __flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
13963                                      uint64_t *hash_field)
13964 {
13965         uint64_t rss_types = rss->origin.types;
13966
13967         switch (*hash_field & ~IBV_RX_HASH_INNER) {
13968         case MLX5_RSS_HASH_IPV4:
13969                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
13970                         *hash_field &= ~MLX5_RSS_HASH_IPV4;
13971                         if (rss_types & ETH_RSS_L3_DST_ONLY)
13972                                 *hash_field |= IBV_RX_HASH_DST_IPV4;
13973                         else if (rss_types & ETH_RSS_L3_SRC_ONLY)
13974                                 *hash_field |= IBV_RX_HASH_SRC_IPV4;
13975                         else
13976                                 *hash_field |= MLX5_RSS_HASH_IPV4;
13977                 }
13978                 return;
13979         case MLX5_RSS_HASH_IPV6:
13980                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
13981                         *hash_field &= ~MLX5_RSS_HASH_IPV6;
13982                         if (rss_types & ETH_RSS_L3_DST_ONLY)
13983                                 *hash_field |= IBV_RX_HASH_DST_IPV6;
13984                         else if (rss_types & ETH_RSS_L3_SRC_ONLY)
13985                                 *hash_field |= IBV_RX_HASH_SRC_IPV6;
13986                         else
13987                                 *hash_field |= MLX5_RSS_HASH_IPV6;
13988                 }
13989                 return;
13990         case MLX5_RSS_HASH_IPV4_UDP:
13991                 /* fall-through. */
13992         case MLX5_RSS_HASH_IPV6_UDP:
13993                 if (rss_types & ETH_RSS_UDP) {
13994                         *hash_field &= ~MLX5_UDP_IBV_RX_HASH;
13995                         if (rss_types & ETH_RSS_L4_DST_ONLY)
13996                                 *hash_field |= IBV_RX_HASH_DST_PORT_UDP;
13997                         else if (rss_types & ETH_RSS_L4_SRC_ONLY)
13998                                 *hash_field |= IBV_RX_HASH_SRC_PORT_UDP;
13999                         else
14000                                 *hash_field |= MLX5_UDP_IBV_RX_HASH;
14001                 }
14002                 return;
14003         case MLX5_RSS_HASH_IPV4_TCP:
14004                 /* fall-through. */
14005         case MLX5_RSS_HASH_IPV6_TCP:
14006                 if (rss_types & ETH_RSS_TCP) {
14007                         *hash_field &= ~MLX5_TCP_IBV_RX_HASH;
14008                         if (rss_types & ETH_RSS_L4_DST_ONLY)
14009                                 *hash_field |= IBV_RX_HASH_DST_PORT_TCP;
14010                         else if (rss_types & ETH_RSS_L4_SRC_ONLY)
14011                                 *hash_field |= IBV_RX_HASH_SRC_PORT_TCP;
14012                         else
14013                                 *hash_field |= MLX5_TCP_IBV_RX_HASH;
14014                 }
14015                 return;
14016         default:
14017                 return;
14018         }
14019 }
14020
14021 /**
14022  * Setup shared RSS action.
14023  * Prepare set of hash RX queue objects sufficient to handle all valid
14024  * hash_fields combinations (see enum ibv_rx_hash_fields).
14025  *
14026  * @param[in] dev
14027  *   Pointer to the Ethernet device structure.
14028  * @param[in] action_idx
14029  *   Shared RSS action ipool index.
14030  * @param[in, out] action
14031  *   Partially initialized shared RSS action.
14032  * @param[out] error
14033  *   Perform verbose error reporting if not NULL. Initialized in case of
14034  *   error only.
14035  *
14036  * @return
14037  *   0 on success, otherwise negative errno value.
14038  */
14039 static int
14040 __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
14041                            uint32_t action_idx,
14042                            struct mlx5_shared_action_rss *shared_rss,
14043                            struct rte_flow_error *error)
14044 {
14045         struct mlx5_flow_rss_desc rss_desc = { 0 };
14046         size_t i;
14047         int err;
14048
14049         if (mlx5_ind_table_obj_setup(dev, shared_rss->ind_tbl)) {
14050                 return rte_flow_error_set(error, rte_errno,
14051                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14052                                           "cannot setup indirection table");
14053         }
14054         memcpy(rss_desc.key, shared_rss->origin.key, MLX5_RSS_HASH_KEY_LEN);
14055         rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
14056         rss_desc.const_q = shared_rss->origin.queue;
14057         rss_desc.queue_num = shared_rss->origin.queue_num;
14058         /* Set non-zero value to indicate a shared RSS. */
14059         rss_desc.shared_rss = action_idx;
14060         rss_desc.ind_tbl = shared_rss->ind_tbl;
14061         for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
14062                 uint32_t hrxq_idx;
14063                 uint64_t hash_fields = mlx5_rss_hash_fields[i];
14064                 int tunnel = 0;
14065
14066                 __flow_dv_action_rss_l34_hash_adjust(shared_rss, &hash_fields);
14067                 if (shared_rss->origin.level > 1) {
14068                         hash_fields |= IBV_RX_HASH_INNER;
14069                         tunnel = 1;
14070                 }
14071                 rss_desc.tunnel = tunnel;
14072                 rss_desc.hash_fields = hash_fields;
14073                 hrxq_idx = mlx5_hrxq_get(dev, &rss_desc);
14074                 if (!hrxq_idx) {
14075                         rte_flow_error_set
14076                                 (error, rte_errno,
14077                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14078                                  "cannot get hash queue");
14079                         goto error_hrxq_new;
14080                 }
14081                 err = __flow_dv_action_rss_hrxq_set
14082                         (shared_rss, hash_fields, hrxq_idx);
14083                 MLX5_ASSERT(!err);
14084         }
14085         return 0;
14086 error_hrxq_new:
14087         err = rte_errno;
14088         __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
14089         if (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true))
14090                 shared_rss->ind_tbl = NULL;
14091         rte_errno = err;
14092         return -rte_errno;
14093 }
14094
14095 /**
14096  * Create shared RSS action.
14097  *
14098  * @param[in] dev
14099  *   Pointer to the Ethernet device structure.
14100  * @param[in] conf
14101  *   Shared action configuration.
14102  * @param[in] rss
14103  *   RSS action specification used to create shared action.
14104  * @param[out] error
14105  *   Perform verbose error reporting if not NULL. Initialized in case of
14106  *   error only.
14107  *
14108  * @return
14109  *   A valid shared action ID in case of success, 0 otherwise and
14110  *   rte_errno is set.
14111  */
14112 static uint32_t
14113 __flow_dv_action_rss_create(struct rte_eth_dev *dev,
14114                             const struct rte_flow_indir_action_conf *conf,
14115                             const struct rte_flow_action_rss *rss,
14116                             struct rte_flow_error *error)
14117 {
14118         struct mlx5_priv *priv = dev->data->dev_private;
14119         struct mlx5_shared_action_rss *shared_rss = NULL;
14120         void *queue = NULL;
14121         struct rte_flow_action_rss *origin;
14122         const uint8_t *rss_key;
14123         uint32_t queue_size = rss->queue_num * sizeof(uint16_t);
14124         uint32_t idx;
14125
14126         RTE_SET_USED(conf);
14127         queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
14128                             0, SOCKET_ID_ANY);
14129         shared_rss = mlx5_ipool_zmalloc
14130                          (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], &idx);
14131         if (!shared_rss || !queue) {
14132                 rte_flow_error_set(error, ENOMEM,
14133                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14134                                    "cannot allocate resource memory");
14135                 goto error_rss_init;
14136         }
14137         if (idx > (1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET)) {
14138                 rte_flow_error_set(error, E2BIG,
14139                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14140                                    "rss action number out of range");
14141                 goto error_rss_init;
14142         }
14143         shared_rss->ind_tbl = mlx5_malloc(MLX5_MEM_ZERO,
14144                                           sizeof(*shared_rss->ind_tbl),
14145                                           0, SOCKET_ID_ANY);
14146         if (!shared_rss->ind_tbl) {
14147                 rte_flow_error_set(error, ENOMEM,
14148                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14149                                    "cannot allocate resource memory");
14150                 goto error_rss_init;
14151         }
14152         memcpy(queue, rss->queue, queue_size);
14153         shared_rss->ind_tbl->queues = queue;
14154         shared_rss->ind_tbl->queues_n = rss->queue_num;
14155         origin = &shared_rss->origin;
14156         origin->func = rss->func;
14157         origin->level = rss->level;
14158         /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
14159         origin->types = !rss->types ? ETH_RSS_IP : rss->types;
14160         /* NULL RSS key indicates default RSS key. */
14161         rss_key = !rss->key ? rss_hash_default_key : rss->key;
14162         memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
14163         origin->key = &shared_rss->key[0];
14164         origin->key_len = MLX5_RSS_HASH_KEY_LEN;
14165         origin->queue = queue;
14166         origin->queue_num = rss->queue_num;
14167         if (__flow_dv_action_rss_setup(dev, idx, shared_rss, error))
14168                 goto error_rss_init;
14169         rte_spinlock_init(&shared_rss->action_rss_sl);
14170         __atomic_add_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
14171         rte_spinlock_lock(&priv->shared_act_sl);
14172         ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14173                      &priv->rss_shared_actions, idx, shared_rss, next);
14174         rte_spinlock_unlock(&priv->shared_act_sl);
14175         return idx;
14176 error_rss_init:
14177         if (shared_rss) {
14178                 if (shared_rss->ind_tbl)
14179                         mlx5_free(shared_rss->ind_tbl);
14180                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14181                                 idx);
14182         }
14183         if (queue)
14184                 mlx5_free(queue);
14185         return 0;
14186 }
14187
14188 /**
14189  * Destroy the shared RSS action.
14190  * Release related hash RX queue objects.
14191  *
14192  * @param[in] dev
14193  *   Pointer to the Ethernet device structure.
14194  * @param[in] idx
14195  *   The shared RSS action object ID to be removed.
14196  * @param[out] error
14197  *   Perform verbose error reporting if not NULL. Initialized in case of
14198  *   error only.
14199  *
14200  * @return
14201  *   0 on success, otherwise negative errno value.
14202  */
14203 static int
14204 __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx,
14205                              struct rte_flow_error *error)
14206 {
14207         struct mlx5_priv *priv = dev->data->dev_private;
14208         struct mlx5_shared_action_rss *shared_rss =
14209             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
14210         uint32_t old_refcnt = 1;
14211         int remaining;
14212         uint16_t *queue = NULL;
14213
14214         if (!shared_rss)
14215                 return rte_flow_error_set(error, EINVAL,
14216                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14217                                           "invalid shared action");
14218         remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
14219         if (remaining)
14220                 return rte_flow_error_set(error, EBUSY,
14221                                           RTE_FLOW_ERROR_TYPE_ACTION,
14222                                           NULL,
14223                                           "shared rss hrxq has references");
14224         if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt,
14225                                          0, 0, __ATOMIC_ACQUIRE,
14226                                          __ATOMIC_RELAXED))
14227                 return rte_flow_error_set(error, EBUSY,
14228                                           RTE_FLOW_ERROR_TYPE_ACTION,
14229                                           NULL,
14230                                           "shared rss has references");
14231         queue = shared_rss->ind_tbl->queues;
14232         remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true);
14233         if (remaining)
14234                 return rte_flow_error_set(error, EBUSY,
14235                                           RTE_FLOW_ERROR_TYPE_ACTION,
14236                                           NULL,
14237                                           "shared rss indirection table has"
14238                                           " references");
14239         mlx5_free(queue);
14240         rte_spinlock_lock(&priv->shared_act_sl);
14241         ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14242                      &priv->rss_shared_actions, idx, shared_rss, next);
14243         rte_spinlock_unlock(&priv->shared_act_sl);
14244         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14245                         idx);
14246         return 0;
14247 }
14248
14249 /**
14250  * Create indirect action, lock free,
14251  * (mutex should be acquired by caller).
14252  * Dispatcher for action type specific call.
14253  *
14254  * @param[in] dev
14255  *   Pointer to the Ethernet device structure.
14256  * @param[in] conf
14257  *   Shared action configuration.
14258  * @param[in] action
14259  *   Action specification used to create indirect action.
14260  * @param[out] error
14261  *   Perform verbose error reporting if not NULL. Initialized in case of
14262  *   error only.
14263  *
14264  * @return
14265  *   A valid shared action handle in case of success, NULL otherwise and
14266  *   rte_errno is set.
14267  */
14268 static struct rte_flow_action_handle *
14269 flow_dv_action_create(struct rte_eth_dev *dev,
14270                       const struct rte_flow_indir_action_conf *conf,
14271                       const struct rte_flow_action *action,
14272                       struct rte_flow_error *err)
14273 {
14274         struct mlx5_priv *priv = dev->data->dev_private;
14275         uint32_t age_idx = 0;
14276         uint32_t idx = 0;
14277         uint32_t ret = 0;
14278
14279         switch (action->type) {
14280         case RTE_FLOW_ACTION_TYPE_RSS:
14281                 ret = __flow_dv_action_rss_create(dev, conf, action->conf, err);
14282                 idx = (MLX5_INDIRECT_ACTION_TYPE_RSS <<
14283                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
14284                 break;
14285         case RTE_FLOW_ACTION_TYPE_AGE:
14286                 age_idx = flow_dv_aso_age_alloc(dev, err);
14287                 if (!age_idx) {
14288                         ret = -rte_errno;
14289                         break;
14290                 }
14291                 idx = (MLX5_INDIRECT_ACTION_TYPE_AGE <<
14292                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | age_idx;
14293                 flow_dv_aso_age_params_init(dev, age_idx,
14294                                         ((const struct rte_flow_action_age *)
14295                                                 action->conf)->context ?
14296                                         ((const struct rte_flow_action_age *)
14297                                                 action->conf)->context :
14298                                         (void *)(uintptr_t)idx,
14299                                         ((const struct rte_flow_action_age *)
14300                                                 action->conf)->timeout);
14301                 ret = age_idx;
14302                 break;
14303         case RTE_FLOW_ACTION_TYPE_COUNT:
14304                 ret = flow_dv_translate_create_counter(dev, NULL, NULL, NULL);
14305                 idx = (MLX5_INDIRECT_ACTION_TYPE_COUNT <<
14306                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
14307                 break;
14308         case RTE_FLOW_ACTION_TYPE_CONNTRACK:
14309                 ret = flow_dv_translate_create_conntrack(dev, action->conf,
14310                                                          err);
14311                 idx = MLX5_INDIRECT_ACT_CT_GEN_IDX(PORT_ID(priv), ret);
14312                 break;
14313         default:
14314                 rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
14315                                    NULL, "action type not supported");
14316                 break;
14317         }
14318         return ret ? (struct rte_flow_action_handle *)(uintptr_t)idx : NULL;
14319 }
14320
14321 /**
14322  * Destroy the indirect action.
14323  * Release action related resources on the NIC and the memory.
14324  * Lock free, (mutex should be acquired by caller).
14325  * Dispatcher for action type specific call.
14326  *
14327  * @param[in] dev
14328  *   Pointer to the Ethernet device structure.
14329  * @param[in] handle
14330  *   The indirect action object handle to be removed.
14331  * @param[out] error
14332  *   Perform verbose error reporting if not NULL. Initialized in case of
14333  *   error only.
14334  *
14335  * @return
14336  *   0 on success, otherwise negative errno value.
14337  */
14338 static int
14339 flow_dv_action_destroy(struct rte_eth_dev *dev,
14340                        struct rte_flow_action_handle *handle,
14341                        struct rte_flow_error *error)
14342 {
14343         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
14344         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
14345         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
14346         struct mlx5_flow_counter *cnt;
14347         uint32_t no_flow_refcnt = 1;
14348         int ret;
14349
14350         switch (type) {
14351         case MLX5_INDIRECT_ACTION_TYPE_RSS:
14352                 return __flow_dv_action_rss_release(dev, idx, error);
14353         case MLX5_INDIRECT_ACTION_TYPE_COUNT:
14354                 cnt = flow_dv_counter_get_by_idx(dev, idx, NULL);
14355                 if (!__atomic_compare_exchange_n(&cnt->shared_info.refcnt,
14356                                                  &no_flow_refcnt, 1, false,
14357                                                  __ATOMIC_ACQUIRE,
14358                                                  __ATOMIC_RELAXED))
14359                         return rte_flow_error_set(error, EBUSY,
14360                                                   RTE_FLOW_ERROR_TYPE_ACTION,
14361                                                   NULL,
14362                                                   "Indirect count action has references");
14363                 flow_dv_counter_free(dev, idx);
14364                 return 0;
14365         case MLX5_INDIRECT_ACTION_TYPE_AGE:
14366                 ret = flow_dv_aso_age_release(dev, idx);
14367                 if (ret)
14368                         /*
14369                          * In this case, the last flow has a reference will
14370                          * actually release the age action.
14371                          */
14372                         DRV_LOG(DEBUG, "Indirect age action %" PRIu32 " was"
14373                                 " released with references %d.", idx, ret);
14374                 return 0;
14375         case MLX5_INDIRECT_ACTION_TYPE_CT:
14376                 ret = flow_dv_aso_ct_release(dev, idx);
14377                 if (ret < 0)
14378                         return ret;
14379                 if (ret > 0)
14380                         DRV_LOG(DEBUG, "Connection tracking object %u still "
14381                                 "has references %d.", idx, ret);
14382                 return 0;
14383         default:
14384                 return rte_flow_error_set(error, ENOTSUP,
14385                                           RTE_FLOW_ERROR_TYPE_ACTION,
14386                                           NULL,
14387                                           "action type not supported");
14388         }
14389 }
14390
14391 /**
14392  * Updates in place shared RSS action configuration.
14393  *
14394  * @param[in] dev
14395  *   Pointer to the Ethernet device structure.
14396  * @param[in] idx
14397  *   The shared RSS action object ID to be updated.
14398  * @param[in] action_conf
14399  *   RSS action specification used to modify *shared_rss*.
14400  * @param[out] error
14401  *   Perform verbose error reporting if not NULL. Initialized in case of
14402  *   error only.
14403  *
14404  * @return
14405  *   0 on success, otherwise negative errno value.
14406  * @note: currently only support update of RSS queues.
14407  */
14408 static int
14409 __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx,
14410                             const struct rte_flow_action_rss *action_conf,
14411                             struct rte_flow_error *error)
14412 {
14413         struct mlx5_priv *priv = dev->data->dev_private;
14414         struct mlx5_shared_action_rss *shared_rss =
14415             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
14416         int ret = 0;
14417         void *queue = NULL;
14418         uint16_t *queue_old = NULL;
14419         uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);
14420
14421         if (!shared_rss)
14422                 return rte_flow_error_set(error, EINVAL,
14423                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14424                                           "invalid shared action to update");
14425         if (priv->obj_ops.ind_table_modify == NULL)
14426                 return rte_flow_error_set(error, ENOTSUP,
14427                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14428                                           "cannot modify indirection table");
14429         queue = mlx5_malloc(MLX5_MEM_ZERO,
14430                             RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
14431                             0, SOCKET_ID_ANY);
14432         if (!queue)
14433                 return rte_flow_error_set(error, ENOMEM,
14434                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14435                                           NULL,
14436                                           "cannot allocate resource memory");
14437         memcpy(queue, action_conf->queue, queue_size);
14438         MLX5_ASSERT(shared_rss->ind_tbl);
14439         rte_spinlock_lock(&shared_rss->action_rss_sl);
14440         queue_old = shared_rss->ind_tbl->queues;
14441         ret = mlx5_ind_table_obj_modify(dev, shared_rss->ind_tbl,
14442                                         queue, action_conf->queue_num, true);
14443         if (ret) {
14444                 mlx5_free(queue);
14445                 ret = rte_flow_error_set(error, rte_errno,
14446                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14447                                           "cannot update indirection table");
14448         } else {
14449                 mlx5_free(queue_old);
14450                 shared_rss->origin.queue = queue;
14451                 shared_rss->origin.queue_num = action_conf->queue_num;
14452         }
14453         rte_spinlock_unlock(&shared_rss->action_rss_sl);
14454         return ret;
14455 }
14456
14457 /*
14458  * Updates in place conntrack context or direction.
14459  * Context update should be synchronized.
14460  *
14461  * @param[in] dev
14462  *   Pointer to the Ethernet device structure.
14463  * @param[in] idx
14464  *   The conntrack object ID to be updated.
14465  * @param[in] update
14466  *   Pointer to the structure of information to update.
14467  * @param[out] error
14468  *   Perform verbose error reporting if not NULL. Initialized in case of
14469  *   error only.
14470  *
14471  * @return
14472  *   0 on success, otherwise negative errno value.
14473  */
14474 static int
14475 __flow_dv_action_ct_update(struct rte_eth_dev *dev, uint32_t idx,
14476                            const struct rte_flow_modify_conntrack *update,
14477                            struct rte_flow_error *error)
14478 {
14479         struct mlx5_priv *priv = dev->data->dev_private;
14480         struct mlx5_aso_ct_action *ct;
14481         const struct rte_flow_action_conntrack *new_prf;
14482         int ret = 0;
14483         uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx);
14484         uint32_t dev_idx;
14485
14486         if (PORT_ID(priv) != owner)
14487                 return rte_flow_error_set(error, EACCES,
14488                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14489                                           NULL,
14490                                           "CT object owned by another port");
14491         dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx);
14492         ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx);
14493         if (!ct->refcnt)
14494                 return rte_flow_error_set(error, ENOMEM,
14495                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14496                                           NULL,
14497                                           "CT object is inactive");
14498         new_prf = &update->new_ct;
14499         if (update->direction)
14500                 ct->is_original = !!new_prf->is_original_dir;
14501         if (update->state) {
14502                 /* Only validate the profile when it needs to be updated. */
14503                 ret = mlx5_validate_action_ct(dev, new_prf, error);
14504                 if (ret)
14505                         return ret;
14506                 ret = mlx5_aso_ct_update_by_wqe(priv->sh, ct, new_prf);
14507                 if (ret)
14508                         return rte_flow_error_set(error, EIO,
14509                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14510                                         NULL,
14511                                         "Failed to send CT context update WQE");
14512                 /* Block until ready or a failure. */
14513                 ret = mlx5_aso_ct_available(priv->sh, ct);
14514                 if (ret)
14515                         rte_flow_error_set(error, rte_errno,
14516                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14517                                            NULL,
14518                                            "Timeout to get the CT update");
14519         }
14520         return ret;
14521 }
14522
14523 /**
14524  * Updates in place shared action configuration, lock free,
14525  * (mutex should be acquired by caller).
14526  *
14527  * @param[in] dev
14528  *   Pointer to the Ethernet device structure.
14529  * @param[in] handle
14530  *   The indirect action object handle to be updated.
14531  * @param[in] update
14532  *   Action specification used to modify the action pointed by *handle*.
14533  *   *update* could be of same type with the action pointed by the *handle*
14534  *   handle argument, or some other structures like a wrapper, depending on
14535  *   the indirect action type.
14536  * @param[out] error
14537  *   Perform verbose error reporting if not NULL. Initialized in case of
14538  *   error only.
14539  *
14540  * @return
14541  *   0 on success, otherwise negative errno value.
14542  */
14543 static int
14544 flow_dv_action_update(struct rte_eth_dev *dev,
14545                         struct rte_flow_action_handle *handle,
14546                         const void *update,
14547                         struct rte_flow_error *err)
14548 {
14549         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
14550         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
14551         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
14552         const void *action_conf;
14553
14554         switch (type) {
14555         case MLX5_INDIRECT_ACTION_TYPE_RSS:
14556                 action_conf = ((const struct rte_flow_action *)update)->conf;
14557                 return __flow_dv_action_rss_update(dev, idx, action_conf, err);
14558         case MLX5_INDIRECT_ACTION_TYPE_CT:
14559                 return __flow_dv_action_ct_update(dev, idx, update, err);
14560         default:
14561                 return rte_flow_error_set(err, ENOTSUP,
14562                                           RTE_FLOW_ERROR_TYPE_ACTION,
14563                                           NULL,
14564                                           "action type update not supported");
14565         }
14566 }
14567
14568 /**
14569  * Destroy the meter sub policy table rules.
14570  * Lock free, (mutex should be acquired by caller).
14571  *
14572  * @param[in] dev
14573  *   Pointer to Ethernet device.
14574  * @param[in] sub_policy
14575  *   Pointer to meter sub policy table.
14576  */
14577 static void
14578 __flow_dv_destroy_sub_policy_rules(struct rte_eth_dev *dev,
14579                              struct mlx5_flow_meter_sub_policy *sub_policy)
14580 {
14581         struct mlx5_flow_tbl_data_entry *tbl;
14582         int i;
14583
14584         for (i = 0; i < RTE_COLORS; i++) {
14585                 if (sub_policy->color_rule[i]) {
14586                         claim_zero(mlx5_flow_os_destroy_flow
14587                                 (sub_policy->color_rule[i]));
14588                         sub_policy->color_rule[i] = NULL;
14589                 }
14590                 if (sub_policy->color_matcher[i]) {
14591                         tbl = container_of(sub_policy->color_matcher[i]->tbl,
14592                                 typeof(*tbl), tbl);
14593                         mlx5_cache_unregister(&tbl->matchers,
14594                                       &sub_policy->color_matcher[i]->entry);
14595                         sub_policy->color_matcher[i] = NULL;
14596                 }
14597         }
14598         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
14599                 if (sub_policy->rix_hrxq[i]) {
14600                         mlx5_hrxq_release(dev, sub_policy->rix_hrxq[i]);
14601                         sub_policy->rix_hrxq[i] = 0;
14602                 }
14603                 if (sub_policy->jump_tbl[i]) {
14604                         flow_dv_tbl_resource_release(MLX5_SH(dev),
14605                         sub_policy->jump_tbl[i]);
14606                         sub_policy->jump_tbl[i] = NULL;
14607                 }
14608         }
14609         if (sub_policy->tbl_rsc) {
14610                 flow_dv_tbl_resource_release(MLX5_SH(dev),
14611                         sub_policy->tbl_rsc);
14612                 sub_policy->tbl_rsc = NULL;
14613         }
14614 }
14615
14616 /**
14617  * Destroy policy rules, lock free,
14618  * (mutex should be acquired by caller).
14619  * Dispatcher for action type specific call.
14620  *
14621  * @param[in] dev
14622  *   Pointer to the Ethernet device structure.
14623  * @param[in] mtr_policy
14624  *   Meter policy struct.
14625  */
14626 static void
14627 flow_dv_destroy_policy_rules(struct rte_eth_dev *dev,
14628                       struct mlx5_flow_meter_policy *mtr_policy)
14629 {
14630         uint32_t i, j;
14631         struct mlx5_flow_meter_sub_policy *sub_policy;
14632         uint16_t sub_policy_num;
14633
14634         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
14635                 sub_policy_num = (mtr_policy->sub_policy_num >>
14636                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
14637                         MLX5_MTR_SUB_POLICY_NUM_MASK;
14638                 for (j = 0; j < sub_policy_num; j++) {
14639                         sub_policy = mtr_policy->sub_policys[i][j];
14640                         if (sub_policy)
14641                                 __flow_dv_destroy_sub_policy_rules
14642                                                 (dev, sub_policy);
14643                 }
14644         }
14645 }
14646
14647 /**
14648  * Destroy policy action, lock free,
14649  * (mutex should be acquired by caller).
14650  * Dispatcher for action type specific call.
14651  *
14652  * @param[in] dev
14653  *   Pointer to the Ethernet device structure.
14654  * @param[in] mtr_policy
14655  *   Meter policy struct.
14656  */
14657 static void
14658 flow_dv_destroy_mtr_policy_acts(struct rte_eth_dev *dev,
14659                       struct mlx5_flow_meter_policy *mtr_policy)
14660 {
14661         struct rte_flow_action *rss_action;
14662         struct mlx5_flow_handle dev_handle;
14663         uint32_t i, j;
14664
14665         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
14666                 if (mtr_policy->act_cnt[i].rix_mark) {
14667                         flow_dv_tag_release(dev,
14668                                 mtr_policy->act_cnt[i].rix_mark);
14669                         mtr_policy->act_cnt[i].rix_mark = 0;
14670                 }
14671                 if (mtr_policy->act_cnt[i].modify_hdr) {
14672                         dev_handle.dvh.modify_hdr =
14673                                 mtr_policy->act_cnt[i].modify_hdr;
14674                         flow_dv_modify_hdr_resource_release(dev, &dev_handle);
14675                 }
14676                 switch (mtr_policy->act_cnt[i].fate_action) {
14677                 case MLX5_FLOW_FATE_SHARED_RSS:
14678                         rss_action = mtr_policy->act_cnt[i].rss;
14679                         mlx5_free(rss_action);
14680                         break;
14681                 case MLX5_FLOW_FATE_PORT_ID:
14682                         if (mtr_policy->act_cnt[i].rix_port_id_action) {
14683                                 flow_dv_port_id_action_resource_release(dev,
14684                                 mtr_policy->act_cnt[i].rix_port_id_action);
14685                                 mtr_policy->act_cnt[i].rix_port_id_action = 0;
14686                         }
14687                         break;
14688                 case MLX5_FLOW_FATE_DROP:
14689                 case MLX5_FLOW_FATE_JUMP:
14690                         for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
14691                                 mtr_policy->act_cnt[i].dr_jump_action[j] =
14692                                                 NULL;
14693                         break;
14694                 default:
14695                         /*Queue action do nothing*/
14696                         break;
14697                 }
14698         }
14699         for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
14700                 mtr_policy->dr_drop_action[j] = NULL;
14701 }
14702
14703 /**
14704  * Create policy action per domain, lock free,
14705  * (mutex should be acquired by caller).
14706  * Dispatcher for action type specific call.
14707  *
14708  * @param[in] dev
14709  *   Pointer to the Ethernet device structure.
14710  * @param[in] mtr_policy
14711  *   Meter policy struct.
14712  * @param[in] action
14713  *   Action specification used to create meter actions.
14714  * @param[out] error
14715  *   Perform verbose error reporting if not NULL. Initialized in case of
14716  *   error only.
14717  *
14718  * @return
14719  *   0 on success, otherwise negative errno value.
14720  */
14721 static int
14722 __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,
14723                         struct mlx5_flow_meter_policy *mtr_policy,
14724                         const struct rte_flow_action *actions[RTE_COLORS],
14725                         enum mlx5_meter_domain domain,
14726                         struct rte_mtr_error *error)
14727 {
14728         struct mlx5_priv *priv = dev->data->dev_private;
14729         struct rte_flow_error flow_err;
14730         const struct rte_flow_action *act;
14731         uint64_t action_flags = 0;
14732         struct mlx5_flow_handle dh;
14733         struct mlx5_flow dev_flow;
14734         struct mlx5_flow_dv_port_id_action_resource port_id_action;
14735         int i, ret;
14736         uint8_t egress, transfer;
14737         struct mlx5_meter_policy_action_container *act_cnt = NULL;
14738         union {
14739                 struct mlx5_flow_dv_modify_hdr_resource res;
14740                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
14741                             sizeof(struct mlx5_modification_cmd) *
14742                             (MLX5_MAX_MODIFY_NUM + 1)];
14743         } mhdr_dummy;
14744
14745         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
14746         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
14747         memset(&dh, 0, sizeof(struct mlx5_flow_handle));
14748         memset(&dev_flow, 0, sizeof(struct mlx5_flow));
14749         memset(&port_id_action, 0,
14750                 sizeof(struct mlx5_flow_dv_port_id_action_resource));
14751         dev_flow.handle = &dh;
14752         dev_flow.dv.port_id_action = &port_id_action;
14753         dev_flow.external = true;
14754         for (i = 0; i < RTE_COLORS; i++) {
14755                 if (i < MLX5_MTR_RTE_COLORS)
14756                         act_cnt = &mtr_policy->act_cnt[i];
14757                 for (act = actions[i];
14758                         act && act->type != RTE_FLOW_ACTION_TYPE_END;
14759                         act++) {
14760                         switch (act->type) {
14761                         case RTE_FLOW_ACTION_TYPE_MARK:
14762                         {
14763                                 uint32_t tag_be = mlx5_flow_mark_set
14764                                         (((const struct rte_flow_action_mark *)
14765                                         (act->conf))->id);
14766
14767                                 if (i >= MLX5_MTR_RTE_COLORS)
14768                                         return -rte_mtr_error_set(error,
14769                                           ENOTSUP,
14770                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
14771                                           NULL,
14772                                           "cannot create policy "
14773                                           "mark action for this color");
14774                                 dev_flow.handle->mark = 1;
14775                                 if (flow_dv_tag_resource_register(dev, tag_be,
14776                                                   &dev_flow, &flow_err))
14777                                         return -rte_mtr_error_set(error,
14778                                         ENOTSUP,
14779                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14780                                         NULL,
14781                                         "cannot setup policy mark action");
14782                                 MLX5_ASSERT(dev_flow.dv.tag_resource);
14783                                 act_cnt->rix_mark =
14784                                         dev_flow.handle->dvh.rix_tag;
14785                                 action_flags |= MLX5_FLOW_ACTION_MARK;
14786                                 break;
14787                         }
14788                         case RTE_FLOW_ACTION_TYPE_SET_TAG:
14789                         {
14790                                 struct mlx5_flow_dv_modify_hdr_resource
14791                                         *mhdr_res = &mhdr_dummy.res;
14792
14793                                 if (i >= MLX5_MTR_RTE_COLORS)
14794                                         return -rte_mtr_error_set(error,
14795                                           ENOTSUP,
14796                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
14797                                           NULL,
14798                                           "cannot create policy "
14799                                           "set tag action for this color");
14800                                 memset(mhdr_res, 0, sizeof(*mhdr_res));
14801                                 mhdr_res->ft_type = transfer ?
14802                                         MLX5DV_FLOW_TABLE_TYPE_FDB :
14803                                         egress ?
14804                                         MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
14805                                         MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
14806                                 if (flow_dv_convert_action_set_tag
14807                                 (dev, mhdr_res,
14808                                 (const struct rte_flow_action_set_tag *)
14809                                 act->conf,  &flow_err))
14810                                         return -rte_mtr_error_set(error,
14811                                         ENOTSUP,
14812                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14813                                         NULL, "cannot convert policy "
14814                                         "set tag action");
14815                                 if (!mhdr_res->actions_num)
14816                                         return -rte_mtr_error_set(error,
14817                                         ENOTSUP,
14818                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14819                                         NULL, "cannot find policy "
14820                                         "set tag action");
14821                                 /* create modify action if needed. */
14822                                 dev_flow.dv.group = 1;
14823                                 if (flow_dv_modify_hdr_resource_register
14824                                         (dev, mhdr_res, &dev_flow, &flow_err))
14825                                         return -rte_mtr_error_set(error,
14826                                         ENOTSUP,
14827                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14828                                         NULL, "cannot register policy "
14829                                         "set tag action");
14830                                 act_cnt->modify_hdr =
14831                                 dev_flow.handle->dvh.modify_hdr;
14832                                 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
14833                                 break;
14834                         }
14835                         case RTE_FLOW_ACTION_TYPE_DROP:
14836                         {
14837                                 struct mlx5_flow_mtr_mng *mtrmng =
14838                                                 priv->sh->mtrmng;
14839                                 struct mlx5_flow_tbl_data_entry *tbl_data;
14840
14841                                 /*
14842                                  * Create the drop table with
14843                                  * METER DROP level.
14844                                  */
14845                                 if (!mtrmng->drop_tbl[domain]) {
14846                                         mtrmng->drop_tbl[domain] =
14847                                         flow_dv_tbl_resource_get(dev,
14848                                         MLX5_FLOW_TABLE_LEVEL_METER,
14849                                         egress, transfer, false, NULL, 0,
14850                                         0, MLX5_MTR_TABLE_ID_DROP, &flow_err);
14851                                         if (!mtrmng->drop_tbl[domain])
14852                                                 return -rte_mtr_error_set
14853                                         (error, ENOTSUP,
14854                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14855                                         NULL,
14856                                         "Failed to create meter drop table");
14857                                 }
14858                                 tbl_data = container_of
14859                                 (mtrmng->drop_tbl[domain],
14860                                 struct mlx5_flow_tbl_data_entry, tbl);
14861                                 if (i < MLX5_MTR_RTE_COLORS) {
14862                                         act_cnt->dr_jump_action[domain] =
14863                                                 tbl_data->jump.action;
14864                                         act_cnt->fate_action =
14865                                                 MLX5_FLOW_FATE_DROP;
14866                                 }
14867                                 if (i == RTE_COLOR_RED)
14868                                         mtr_policy->dr_drop_action[domain] =
14869                                                 tbl_data->jump.action;
14870                                 action_flags |= MLX5_FLOW_ACTION_DROP;
14871                                 break;
14872                         }
14873                         case RTE_FLOW_ACTION_TYPE_QUEUE:
14874                         {
14875                                 if (i >= MLX5_MTR_RTE_COLORS)
14876                                         return -rte_mtr_error_set(error,
14877                                         ENOTSUP,
14878                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14879                                         NULL, "cannot create policy "
14880                                         "fate queue for this color");
14881                                 act_cnt->queue =
14882                                 ((const struct rte_flow_action_queue *)
14883                                         (act->conf))->index;
14884                                 act_cnt->fate_action =
14885                                         MLX5_FLOW_FATE_QUEUE;
14886                                 dev_flow.handle->fate_action =
14887                                         MLX5_FLOW_FATE_QUEUE;
14888                                 mtr_policy->is_queue = 1;
14889                                 action_flags |= MLX5_FLOW_ACTION_QUEUE;
14890                                 break;
14891                         }
14892                         case RTE_FLOW_ACTION_TYPE_RSS:
14893                         {
14894                                 int rss_size;
14895
14896                                 if (i >= MLX5_MTR_RTE_COLORS)
14897                                         return -rte_mtr_error_set(error,
14898                                           ENOTSUP,
14899                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
14900                                           NULL,
14901                                           "cannot create policy "
14902                                           "rss action for this color");
14903                                 /*
14904                                  * Save RSS conf into policy struct
14905                                  * for translate stage.
14906                                  */
14907                                 rss_size = (int)rte_flow_conv
14908                                         (RTE_FLOW_CONV_OP_ACTION,
14909                                         NULL, 0, act, &flow_err);
14910                                 if (rss_size <= 0)
14911                                         return -rte_mtr_error_set(error,
14912                                           ENOTSUP,
14913                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
14914                                           NULL, "Get the wrong "
14915                                           "rss action struct size");
14916                                 act_cnt->rss = mlx5_malloc(MLX5_MEM_ZERO,
14917                                                 rss_size, 0, SOCKET_ID_ANY);
14918                                 if (!act_cnt->rss)
14919                                         return -rte_mtr_error_set(error,
14920                                           ENOTSUP,
14921                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
14922                                           NULL,
14923                                           "Fail to malloc rss action memory");
14924                                 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION,
14925                                         act_cnt->rss, rss_size,
14926                                         act, &flow_err);
14927                                 if (ret < 0)
14928                                         return -rte_mtr_error_set(error,
14929                                           ENOTSUP,
14930                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
14931                                           NULL, "Fail to save "
14932                                           "rss action into policy struct");
14933                                 act_cnt->fate_action =
14934                                         MLX5_FLOW_FATE_SHARED_RSS;
14935                                 action_flags |= MLX5_FLOW_ACTION_RSS;
14936                                 break;
14937                         }
14938                         case RTE_FLOW_ACTION_TYPE_PORT_ID:
14939                         {
14940                                 struct mlx5_flow_dv_port_id_action_resource
14941                                         port_id_resource;
14942                                 uint32_t port_id = 0;
14943
14944                                 if (i >= MLX5_MTR_RTE_COLORS)
14945                                         return -rte_mtr_error_set(error,
14946                                         ENOTSUP,
14947                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14948                                         NULL, "cannot create policy "
14949                                         "port action for this color");
14950                                 memset(&port_id_resource, 0,
14951                                         sizeof(port_id_resource));
14952                                 if (flow_dv_translate_action_port_id(dev, act,
14953                                                 &port_id, &flow_err))
14954                                         return -rte_mtr_error_set(error,
14955                                         ENOTSUP,
14956                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14957                                         NULL, "cannot translate "
14958                                         "policy port action");
14959                                 port_id_resource.port_id = port_id;
14960                                 if (flow_dv_port_id_action_resource_register
14961                                         (dev, &port_id_resource,
14962                                         &dev_flow, &flow_err))
14963                                         return -rte_mtr_error_set(error,
14964                                         ENOTSUP,
14965                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
14966                                         NULL, "cannot setup "
14967                                         "policy port action");
14968                                 act_cnt->rix_port_id_action =
14969                                         dev_flow.handle->rix_port_id_action;
14970                                 act_cnt->fate_action =
14971                                         MLX5_FLOW_FATE_PORT_ID;
14972                                 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
14973                                 break;
14974                         }
14975                         case RTE_FLOW_ACTION_TYPE_JUMP:
14976                         {
14977                                 uint32_t jump_group = 0;
14978                                 uint32_t table = 0;
14979                                 struct mlx5_flow_tbl_data_entry *tbl_data;
14980                                 struct flow_grp_info grp_info = {
14981                                         .external = !!dev_flow.external,
14982                                         .transfer = !!transfer,
14983                                         .fdb_def_rule = !!priv->fdb_def_rule,
14984                                         .std_tbl_fix = 0,
14985                                         .skip_scale = dev_flow.skip_scale &
14986                                         (1 << MLX5_SCALE_FLOW_GROUP_BIT),
14987                                 };
14988                                 struct mlx5_flow_meter_sub_policy *sub_policy =
14989                                 mtr_policy->sub_policys[domain][0];
14990
14991                                 if (i >= MLX5_MTR_RTE_COLORS)
14992                                         return -rte_mtr_error_set(error,
14993                                           ENOTSUP,
14994                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
14995                                           NULL,
14996                                           "cannot create policy "
14997                                           "jump action for this color");
14998                                 jump_group =
14999                                 ((const struct rte_flow_action_jump *)
15000                                                         act->conf)->group;
15001                                 if (mlx5_flow_group_to_table(dev, NULL,
15002                                                        jump_group,
15003                                                        &table,
15004                                                        &grp_info, &flow_err))
15005                                         return -rte_mtr_error_set(error,
15006                                         ENOTSUP,
15007                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15008                                         NULL, "cannot setup "
15009                                         "policy jump action");
15010                                 sub_policy->jump_tbl[i] =
15011                                 flow_dv_tbl_resource_get(dev,
15012                                         table, egress,
15013                                         transfer,
15014                                         !!dev_flow.external,
15015                                         NULL, jump_group, 0,
15016                                         0, &flow_err);
15017                                 if
15018                                 (!sub_policy->jump_tbl[i])
15019                                         return  -rte_mtr_error_set(error,
15020                                         ENOTSUP,
15021                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15022                                         NULL, "cannot create jump action.");
15023                                 tbl_data = container_of
15024                                 (sub_policy->jump_tbl[i],
15025                                 struct mlx5_flow_tbl_data_entry, tbl);
15026                                 act_cnt->dr_jump_action[domain] =
15027                                         tbl_data->jump.action;
15028                                 act_cnt->fate_action =
15029                                         MLX5_FLOW_FATE_JUMP;
15030                                 action_flags |= MLX5_FLOW_ACTION_JUMP;
15031                                 break;
15032                         }
15033                         default:
15034                                 return -rte_mtr_error_set(error, ENOTSUP,
15035                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15036                                           NULL, "action type not supported");
15037                         }
15038                 }
15039         }
15040         return 0;
15041 }
15042
15043 /**
15044  * Create policy action per domain, lock free,
15045  * (mutex should be acquired by caller).
15046  * Dispatcher for action type specific call.
15047  *
15048  * @param[in] dev
15049  *   Pointer to the Ethernet device structure.
15050  * @param[in] mtr_policy
15051  *   Meter policy struct.
15052  * @param[in] action
15053  *   Action specification used to create meter actions.
15054  * @param[out] error
15055  *   Perform verbose error reporting if not NULL. Initialized in case of
15056  *   error only.
15057  *
15058  * @return
15059  *   0 on success, otherwise negative errno value.
15060  */
15061 static int
15062 flow_dv_create_mtr_policy_acts(struct rte_eth_dev *dev,
15063                       struct mlx5_flow_meter_policy *mtr_policy,
15064                       const struct rte_flow_action *actions[RTE_COLORS],
15065                       struct rte_mtr_error *error)
15066 {
15067         int ret, i;
15068         uint16_t sub_policy_num;
15069
15070         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15071                 sub_policy_num = (mtr_policy->sub_policy_num >>
15072                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
15073                         MLX5_MTR_SUB_POLICY_NUM_MASK;
15074                 if (sub_policy_num) {
15075                         ret = __flow_dv_create_domain_policy_acts(dev,
15076                                 mtr_policy, actions,
15077                                 (enum mlx5_meter_domain)i, error);
15078                         if (ret)
15079                                 return ret;
15080                 }
15081         }
15082         return 0;
15083 }
15084
15085 /**
15086  * Query a DV flow rule for its statistics via DevX.
15087  *
15088  * @param[in] dev
15089  *   Pointer to Ethernet device.
15090  * @param[in] cnt_idx
15091  *   Index to the flow counter.
15092  * @param[out] data
15093  *   Data retrieved by the query.
15094  * @param[out] error
15095  *   Perform verbose error reporting if not NULL.
15096  *
15097  * @return
15098  *   0 on success, a negative errno value otherwise and rte_errno is set.
15099  */
15100 static int
15101 flow_dv_query_count(struct rte_eth_dev *dev, uint32_t cnt_idx, void *data,
15102                     struct rte_flow_error *error)
15103 {
15104         struct mlx5_priv *priv = dev->data->dev_private;
15105         struct rte_flow_query_count *qc = data;
15106
15107         if (!priv->config.devx)
15108                 return rte_flow_error_set(error, ENOTSUP,
15109                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15110                                           NULL,
15111                                           "counters are not supported");
15112         if (cnt_idx) {
15113                 uint64_t pkts, bytes;
15114                 struct mlx5_flow_counter *cnt;
15115                 int err = _flow_dv_query_count(dev, cnt_idx, &pkts, &bytes);
15116
15117                 if (err)
15118                         return rte_flow_error_set(error, -err,
15119                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15120                                         NULL, "cannot read counters");
15121                 cnt = flow_dv_counter_get_by_idx(dev, cnt_idx, NULL);
15122                 qc->hits_set = 1;
15123                 qc->bytes_set = 1;
15124                 qc->hits = pkts - cnt->hits;
15125                 qc->bytes = bytes - cnt->bytes;
15126                 if (qc->reset) {
15127                         cnt->hits = pkts;
15128                         cnt->bytes = bytes;
15129                 }
15130                 return 0;
15131         }
15132         return rte_flow_error_set(error, EINVAL,
15133                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15134                                   NULL,
15135                                   "counters are not available");
15136 }
15137
15138 static int
15139 flow_dv_action_query(struct rte_eth_dev *dev,
15140                      const struct rte_flow_action_handle *handle, void *data,
15141                      struct rte_flow_error *error)
15142 {
15143         struct mlx5_age_param *age_param;
15144         struct rte_flow_query_age *resp;
15145         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
15146         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
15147         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
15148         struct mlx5_priv *priv = dev->data->dev_private;
15149         struct mlx5_aso_ct_action *ct;
15150         uint16_t owner;
15151         uint32_t dev_idx;
15152
15153         switch (type) {
15154         case MLX5_INDIRECT_ACTION_TYPE_AGE:
15155                 age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params;
15156                 resp = data;
15157                 resp->aged = __atomic_load_n(&age_param->state,
15158                                               __ATOMIC_RELAXED) == AGE_TMOUT ?
15159                                                                           1 : 0;
15160                 resp->sec_since_last_hit_valid = !resp->aged;
15161                 if (resp->sec_since_last_hit_valid)
15162                         resp->sec_since_last_hit = __atomic_load_n
15163                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
15164                 return 0;
15165         case MLX5_INDIRECT_ACTION_TYPE_COUNT:
15166                 return flow_dv_query_count(dev, idx, data, error);
15167         case MLX5_INDIRECT_ACTION_TYPE_CT:
15168                 owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx);
15169                 if (owner != PORT_ID(priv))
15170                         return rte_flow_error_set(error, EACCES,
15171                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15172                                         NULL,
15173                                         "CT object owned by another port");
15174                 dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx);
15175                 ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx);
15176                 MLX5_ASSERT(ct);
15177                 if (!ct->refcnt)
15178                         return rte_flow_error_set(error, EFAULT,
15179                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15180                                         NULL,
15181                                         "CT object is inactive");
15182                 ((struct rte_flow_action_conntrack *)data)->peer_port =
15183                                                         ct->peer;
15184                 ((struct rte_flow_action_conntrack *)data)->is_original_dir =
15185                                                         ct->is_original;
15186                 if (mlx5_aso_ct_query_by_wqe(priv->sh, ct, data))
15187                         return rte_flow_error_set(error, EIO,
15188                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15189                                         NULL,
15190                                         "Failed to query CT context");
15191                 return 0;
15192         default:
15193                 return rte_flow_error_set(error, ENOTSUP,
15194                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
15195                                           "action type query not supported");
15196         }
15197 }
15198
15199 /**
15200  * Query a flow rule AGE action for aging information.
15201  *
15202  * @param[in] dev
15203  *   Pointer to Ethernet device.
15204  * @param[in] flow
15205  *   Pointer to the sub flow.
15206  * @param[out] data
15207  *   data retrieved by the query.
15208  * @param[out] error
15209  *   Perform verbose error reporting if not NULL.
15210  *
15211  * @return
15212  *   0 on success, a negative errno value otherwise and rte_errno is set.
15213  */
15214 static int
15215 flow_dv_query_age(struct rte_eth_dev *dev, struct rte_flow *flow,
15216                   void *data, struct rte_flow_error *error)
15217 {
15218         struct rte_flow_query_age *resp = data;
15219         struct mlx5_age_param *age_param;
15220
15221         if (flow->age) {
15222                 struct mlx5_aso_age_action *act =
15223                                      flow_aso_age_get_by_idx(dev, flow->age);
15224
15225                 age_param = &act->age_params;
15226         } else if (flow->counter) {
15227                 age_param = flow_dv_counter_idx_get_age(dev, flow->counter);
15228
15229                 if (!age_param || !age_param->timeout)
15230                         return rte_flow_error_set
15231                                         (error, EINVAL,
15232                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15233                                          NULL, "cannot read age data");
15234         } else {
15235                 return rte_flow_error_set(error, EINVAL,
15236                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15237                                           NULL, "age data not available");
15238         }
15239         resp->aged = __atomic_load_n(&age_param->state, __ATOMIC_RELAXED) ==
15240                                      AGE_TMOUT ? 1 : 0;
15241         resp->sec_since_last_hit_valid = !resp->aged;
15242         if (resp->sec_since_last_hit_valid)
15243                 resp->sec_since_last_hit = __atomic_load_n
15244                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
15245         return 0;
15246 }
15247
15248 /**
15249  * Query a flow.
15250  *
15251  * @see rte_flow_query()
15252  * @see rte_flow_ops
15253  */
15254 static int
15255 flow_dv_query(struct rte_eth_dev *dev,
15256               struct rte_flow *flow __rte_unused,
15257               const struct rte_flow_action *actions __rte_unused,
15258               void *data __rte_unused,
15259               struct rte_flow_error *error __rte_unused)
15260 {
15261         int ret = -EINVAL;
15262
15263         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
15264                 switch (actions->type) {
15265                 case RTE_FLOW_ACTION_TYPE_VOID:
15266                         break;
15267                 case RTE_FLOW_ACTION_TYPE_COUNT:
15268                         ret = flow_dv_query_count(dev, flow->counter, data,
15269                                                   error);
15270                         break;
15271                 case RTE_FLOW_ACTION_TYPE_AGE:
15272                         ret = flow_dv_query_age(dev, flow, data, error);
15273                         break;
15274                 default:
15275                         return rte_flow_error_set(error, ENOTSUP,
15276                                                   RTE_FLOW_ERROR_TYPE_ACTION,
15277                                                   actions,
15278                                                   "action not supported");
15279                 }
15280         }
15281         return ret;
15282 }
15283
15284 /**
15285  * Destroy the meter table set.
15286  * Lock free, (mutex should be acquired by caller).
15287  *
15288  * @param[in] dev
15289  *   Pointer to Ethernet device.
15290  * @param[in] fm
15291  *   Meter information table.
15292  */
15293 static void
15294 flow_dv_destroy_mtr_tbls(struct rte_eth_dev *dev,
15295                         struct mlx5_flow_meter_info *fm)
15296 {
15297         struct mlx5_priv *priv = dev->data->dev_private;
15298         int i;
15299
15300         if (!fm || !priv->config.dv_flow_en)
15301                 return;
15302         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15303                 if (fm->drop_rule[i]) {
15304                         claim_zero(mlx5_flow_os_destroy_flow(fm->drop_rule[i]));
15305                         fm->drop_rule[i] = NULL;
15306                 }
15307         }
15308 }
15309
15310 static void
15311 flow_dv_destroy_mtr_drop_tbls(struct rte_eth_dev *dev)
15312 {
15313         struct mlx5_priv *priv = dev->data->dev_private;
15314         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
15315         struct mlx5_flow_tbl_data_entry *tbl;
15316         int i, j;
15317
15318         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15319                 if (mtrmng->def_rule[i]) {
15320                         claim_zero(mlx5_flow_os_destroy_flow
15321                                         (mtrmng->def_rule[i]));
15322                         mtrmng->def_rule[i] = NULL;
15323                 }
15324                 if (mtrmng->def_matcher[i]) {
15325                         tbl = container_of(mtrmng->def_matcher[i]->tbl,
15326                                 struct mlx5_flow_tbl_data_entry, tbl);
15327                         mlx5_cache_unregister(&tbl->matchers,
15328                                       &mtrmng->def_matcher[i]->entry);
15329                         mtrmng->def_matcher[i] = NULL;
15330                 }
15331                 for (j = 0; j < MLX5_REG_BITS; j++) {
15332                         if (mtrmng->drop_matcher[i][j]) {
15333                                 tbl =
15334                                 container_of(mtrmng->drop_matcher[i][j]->tbl,
15335                                              struct mlx5_flow_tbl_data_entry,
15336                                              tbl);
15337                                 mlx5_cache_unregister(&tbl->matchers,
15338                                         &mtrmng->drop_matcher[i][j]->entry);
15339                                 mtrmng->drop_matcher[i][j] = NULL;
15340                         }
15341                 }
15342                 if (mtrmng->drop_tbl[i]) {
15343                         flow_dv_tbl_resource_release(MLX5_SH(dev),
15344                                 mtrmng->drop_tbl[i]);
15345                         mtrmng->drop_tbl[i] = NULL;
15346                 }
15347         }
15348 }
15349
15350 /* Number of meter flow actions, count and jump or count and drop. */
15351 #define METER_ACTIONS 2
15352
15353 static void
15354 __flow_dv_destroy_domain_def_policy(struct rte_eth_dev *dev,
15355                               enum mlx5_meter_domain domain)
15356 {
15357         struct mlx5_priv *priv = dev->data->dev_private;
15358         struct mlx5_flow_meter_def_policy *def_policy =
15359                         priv->sh->mtrmng->def_policy[domain];
15360
15361         __flow_dv_destroy_sub_policy_rules(dev, &def_policy->sub_policy);
15362         mlx5_free(def_policy);
15363         priv->sh->mtrmng->def_policy[domain] = NULL;
15364 }
15365
15366 /**
15367  * Destroy the default policy table set.
15368  *
15369  * @param[in] dev
15370  *   Pointer to Ethernet device.
15371  */
15372 static void
15373 flow_dv_destroy_def_policy(struct rte_eth_dev *dev)
15374 {
15375         struct mlx5_priv *priv = dev->data->dev_private;
15376         int i;
15377
15378         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++)
15379                 if (priv->sh->mtrmng->def_policy[i])
15380                         __flow_dv_destroy_domain_def_policy(dev,
15381                                         (enum mlx5_meter_domain)i);
15382         priv->sh->mtrmng->def_policy_id = MLX5_INVALID_POLICY_ID;
15383 }
15384
15385 static int
15386 __flow_dv_create_policy_flow(struct rte_eth_dev *dev,
15387                         uint32_t color_reg_c_idx,
15388                         enum rte_color color, void *matcher_object,
15389                         int actions_n, void *actions,
15390                         bool match_src_port, void **rule,
15391                         const struct rte_flow_attr *attr)
15392 {
15393         int ret;
15394         struct mlx5_flow_dv_match_params value = {
15395                 .size = sizeof(value.buf) -
15396                         MLX5_ST_SZ_BYTES(fte_match_set_misc4),
15397         };
15398         struct mlx5_flow_dv_match_params matcher = {
15399                 .size = sizeof(matcher.buf) -
15400                         MLX5_ST_SZ_BYTES(fte_match_set_misc4),
15401         };
15402         struct mlx5_priv *priv = dev->data->dev_private;
15403
15404         if (match_src_port && (priv->representor || priv->master)) {
15405                 if (flow_dv_translate_item_port_id(dev, matcher.buf,
15406                                                    value.buf, NULL, attr)) {
15407                         DRV_LOG(ERR,
15408                         "Failed to create meter policy flow with port.");
15409                         return -1;
15410                 }
15411         }
15412         flow_dv_match_meta_reg(matcher.buf, value.buf,
15413                                 (enum modify_reg)color_reg_c_idx,
15414                                 rte_col_2_mlx5_col(color),
15415                                 UINT32_MAX);
15416         ret = mlx5_flow_os_create_flow(matcher_object,
15417                         (void *)&value, actions_n, actions, rule);
15418         if (ret) {
15419                 DRV_LOG(ERR, "Failed to create meter policy flow.");
15420                 return -1;
15421         }
15422         return 0;
15423 }
15424
15425 static int
15426 __flow_dv_create_policy_matcher(struct rte_eth_dev *dev,
15427                         uint32_t color_reg_c_idx,
15428                         uint16_t priority,
15429                         struct mlx5_flow_meter_sub_policy *sub_policy,
15430                         const struct rte_flow_attr *attr,
15431                         bool match_src_port,
15432                         struct rte_flow_error *error)
15433 {
15434         struct mlx5_cache_entry *entry;
15435         struct mlx5_flow_tbl_resource *tbl_rsc = sub_policy->tbl_rsc;
15436         struct mlx5_flow_dv_matcher matcher = {
15437                 .mask = {
15438                         .size = sizeof(matcher.mask.buf) -
15439                                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
15440                 },
15441                 .tbl = tbl_rsc,
15442         };
15443         struct mlx5_flow_dv_match_params value = {
15444                 .size = sizeof(value.buf) -
15445                         MLX5_ST_SZ_BYTES(fte_match_set_misc4),
15446         };
15447         struct mlx5_flow_cb_ctx ctx = {
15448                 .error = error,
15449                 .data = &matcher,
15450         };
15451         struct mlx5_flow_tbl_data_entry *tbl_data;
15452         struct mlx5_priv *priv = dev->data->dev_private;
15453         uint32_t color_mask = (UINT32_C(1) << MLX5_MTR_COLOR_BITS) - 1;
15454
15455         if (match_src_port && (priv->representor || priv->master)) {
15456                 if (flow_dv_translate_item_port_id(dev, matcher.mask.buf,
15457                                                    value.buf, NULL, attr)) {
15458                         DRV_LOG(ERR,
15459                         "Failed to register meter drop matcher with port.");
15460                         return -1;
15461                 }
15462         }
15463         tbl_data = container_of(tbl_rsc, struct mlx5_flow_tbl_data_entry, tbl);
15464         if (priority < RTE_COLOR_RED)
15465                 flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
15466                         (enum modify_reg)color_reg_c_idx, 0, color_mask);
15467         matcher.priority = priority;
15468         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
15469                                         matcher.mask.size);
15470         entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
15471         if (!entry) {
15472                 DRV_LOG(ERR, "Failed to register meter drop matcher.");
15473                 return -1;
15474         }
15475         sub_policy->color_matcher[priority] =
15476                 container_of(entry, struct mlx5_flow_dv_matcher, entry);
15477         return 0;
15478 }
15479
15480 /**
15481  * Create the policy rules per domain.
15482  *
15483  * @param[in] dev
15484  *   Pointer to Ethernet device.
15485  * @param[in] sub_policy
15486  *    Pointer to sub policy table..
15487  * @param[in] egress
15488  *   Direction of the table.
15489  * @param[in] transfer
15490  *   E-Switch or NIC flow.
15491  * @param[in] acts
15492  *   Pointer to policy action list per color.
15493  *
15494  * @return
15495  *   0 on success, -1 otherwise.
15496  */
15497 static int
15498 __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev,
15499                 struct mlx5_flow_meter_sub_policy *sub_policy,
15500                 uint8_t egress, uint8_t transfer, bool match_src_port,
15501                 struct mlx5_meter_policy_acts acts[RTE_COLORS])
15502 {
15503         struct rte_flow_error flow_err;
15504         uint32_t color_reg_c_idx;
15505         struct rte_flow_attr attr = {
15506                 .group = MLX5_FLOW_TABLE_LEVEL_POLICY,
15507                 .priority = 0,
15508                 .ingress = 0,
15509                 .egress = !!egress,
15510                 .transfer = !!transfer,
15511                 .reserved = 0,
15512         };
15513         int i;
15514         int ret = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &flow_err);
15515
15516         if (ret < 0)
15517                 return -1;
15518         /* Create policy table with POLICY level. */
15519         if (!sub_policy->tbl_rsc)
15520                 sub_policy->tbl_rsc = flow_dv_tbl_resource_get(dev,
15521                                 MLX5_FLOW_TABLE_LEVEL_POLICY,
15522                                 egress, transfer, false, NULL, 0, 0,
15523                                 sub_policy->idx, &flow_err);
15524         if (!sub_policy->tbl_rsc) {
15525                 DRV_LOG(ERR,
15526                         "Failed to create meter sub policy table.");
15527                 return -1;
15528         }
15529         /* Prepare matchers. */
15530         color_reg_c_idx = ret;
15531         for (i = 0; i < RTE_COLORS; i++) {
15532                 if (i == RTE_COLOR_YELLOW || !acts[i].actions_n)
15533                         continue;
15534                 attr.priority = i;
15535                 if (!sub_policy->color_matcher[i]) {
15536                         /* Create matchers for Color. */
15537                         if (__flow_dv_create_policy_matcher(dev,
15538                                 color_reg_c_idx, i, sub_policy,
15539                                 &attr, match_src_port, &flow_err))
15540                                 return -1;
15541                 }
15542                 /* Create flow, matching color. */
15543                 if (acts[i].actions_n)
15544                         if (__flow_dv_create_policy_flow(dev,
15545                                 color_reg_c_idx, (enum rte_color)i,
15546                                 sub_policy->color_matcher[i]->matcher_object,
15547                                 acts[i].actions_n,
15548                                 acts[i].dv_actions,
15549                                 match_src_port,
15550                                 &sub_policy->color_rule[i],
15551                                 &attr))
15552                                 return -1;
15553         }
15554         return 0;
15555 }
15556
15557 static int
15558 __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev,
15559                         struct mlx5_flow_meter_policy *mtr_policy,
15560                         struct mlx5_flow_meter_sub_policy *sub_policy,
15561                         uint32_t domain)
15562 {
15563         struct mlx5_priv *priv = dev->data->dev_private;
15564         struct mlx5_meter_policy_acts acts[RTE_COLORS];
15565         struct mlx5_flow_dv_tag_resource *tag;
15566         struct mlx5_flow_dv_port_id_action_resource *port_action;
15567         struct mlx5_hrxq *hrxq;
15568         uint8_t egress, transfer;
15569         bool match_src_port = false;
15570         int i;
15571
15572         for (i = 0; i < RTE_COLORS; i++) {
15573                 acts[i].actions_n = 0;
15574                 if (i == RTE_COLOR_YELLOW)
15575                         continue;
15576                 if (i == RTE_COLOR_RED) {
15577                         /* Only support drop on red. */
15578                         acts[i].dv_actions[0] =
15579                         mtr_policy->dr_drop_action[domain];
15580                         acts[i].actions_n = 1;
15581                         continue;
15582                 }
15583                 if (mtr_policy->act_cnt[i].rix_mark) {
15584                         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG],
15585                                         mtr_policy->act_cnt[i].rix_mark);
15586                         if (!tag) {
15587                                 DRV_LOG(ERR, "Failed to find "
15588                                 "mark action for policy.");
15589                                 return -1;
15590                         }
15591                         acts[i].dv_actions[acts[i].actions_n] =
15592                                                 tag->action;
15593                         acts[i].actions_n++;
15594                 }
15595                 if (mtr_policy->act_cnt[i].modify_hdr) {
15596                         acts[i].dv_actions[acts[i].actions_n] =
15597                         mtr_policy->act_cnt[i].modify_hdr->action;
15598                         acts[i].actions_n++;
15599                 }
15600                 if (mtr_policy->act_cnt[i].fate_action) {
15601                         switch (mtr_policy->act_cnt[i].fate_action) {
15602                         case MLX5_FLOW_FATE_PORT_ID:
15603                                 port_action = mlx5_ipool_get
15604                                         (priv->sh->ipool[MLX5_IPOOL_PORT_ID],
15605                                 mtr_policy->act_cnt[i].rix_port_id_action);
15606                                 if (!port_action) {
15607                                         DRV_LOG(ERR, "Failed to find "
15608                                                 "port action for policy.");
15609                                         return -1;
15610                                 }
15611                                 acts[i].dv_actions[acts[i].actions_n] =
15612                                 port_action->action;
15613                                 acts[i].actions_n++;
15614                                 mtr_policy->dev = dev;
15615                                 match_src_port = true;
15616                                 break;
15617                         case MLX5_FLOW_FATE_DROP:
15618                         case MLX5_FLOW_FATE_JUMP:
15619                                 acts[i].dv_actions[acts[i].actions_n] =
15620                                 mtr_policy->act_cnt[i].dr_jump_action[domain];
15621                                 acts[i].actions_n++;
15622                                 break;
15623                         case MLX5_FLOW_FATE_SHARED_RSS:
15624                         case MLX5_FLOW_FATE_QUEUE:
15625                                 hrxq = mlx5_ipool_get
15626                                 (priv->sh->ipool[MLX5_IPOOL_HRXQ],
15627                                 sub_policy->rix_hrxq[i]);
15628                                 if (!hrxq) {
15629                                         DRV_LOG(ERR, "Failed to find "
15630                                                 "queue action for policy.");
15631                                         return -1;
15632                                 }
15633                                 acts[i].dv_actions[acts[i].actions_n] =
15634                                 hrxq->action;
15635                                 acts[i].actions_n++;
15636                                 break;
15637                         default:
15638                                 /*Queue action do nothing*/
15639                                 break;
15640                         }
15641                 }
15642         }
15643         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
15644         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
15645         if (__flow_dv_create_domain_policy_rules(dev, sub_policy,
15646                                 egress, transfer, match_src_port, acts)) {
15647                 DRV_LOG(ERR,
15648                 "Failed to create policy rules per domain.");
15649                 return -1;
15650         }
15651         return 0;
15652 }
15653
15654 /**
15655  * Create the policy rules.
15656  *
15657  * @param[in] dev
15658  *   Pointer to Ethernet device.
15659  * @param[in,out] mtr_policy
15660  *   Pointer to meter policy table.
15661  *
15662  * @return
15663  *   0 on success, -1 otherwise.
15664  */
15665 static int
15666 flow_dv_create_policy_rules(struct rte_eth_dev *dev,
15667                              struct mlx5_flow_meter_policy *mtr_policy)
15668 {
15669         int i;
15670         uint16_t sub_policy_num;
15671
15672         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15673                 sub_policy_num = (mtr_policy->sub_policy_num >>
15674                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
15675                         MLX5_MTR_SUB_POLICY_NUM_MASK;
15676                 if (!sub_policy_num)
15677                         continue;
15678                 /* Prepare actions list and create policy rules. */
15679                 if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
15680                         mtr_policy->sub_policys[i][0], i)) {
15681                         DRV_LOG(ERR,
15682                         "Failed to create policy action list per domain.");
15683                         return -1;
15684                 }
15685         }
15686         return 0;
15687 }
15688
15689 static int
15690 __flow_dv_create_domain_def_policy(struct rte_eth_dev *dev, uint32_t domain)
15691 {
15692         struct mlx5_priv *priv = dev->data->dev_private;
15693         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
15694         struct mlx5_flow_meter_def_policy *def_policy;
15695         struct mlx5_flow_tbl_resource *jump_tbl;
15696         struct mlx5_flow_tbl_data_entry *tbl_data;
15697         uint8_t egress, transfer;
15698         struct rte_flow_error error;
15699         struct mlx5_meter_policy_acts acts[RTE_COLORS];
15700         int ret;
15701
15702         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
15703         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
15704         def_policy = mtrmng->def_policy[domain];
15705         if (!def_policy) {
15706                 def_policy = mlx5_malloc(MLX5_MEM_ZERO,
15707                         sizeof(struct mlx5_flow_meter_def_policy),
15708                         RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
15709                 if (!def_policy) {
15710                         DRV_LOG(ERR, "Failed to alloc "
15711                                         "default policy table.");
15712                         goto def_policy_error;
15713                 }
15714                 mtrmng->def_policy[domain] = def_policy;
15715                 /* Create the meter suffix table with SUFFIX level. */
15716                 jump_tbl = flow_dv_tbl_resource_get(dev,
15717                                 MLX5_FLOW_TABLE_LEVEL_METER,
15718                                 egress, transfer, false, NULL, 0,
15719                                 0, MLX5_MTR_TABLE_ID_SUFFIX, &error);
15720                 if (!jump_tbl) {
15721                         DRV_LOG(ERR,
15722                                 "Failed to create meter suffix table.");
15723                         goto def_policy_error;
15724                 }
15725                 def_policy->sub_policy.jump_tbl[RTE_COLOR_GREEN] = jump_tbl;
15726                 tbl_data = container_of(jump_tbl,
15727                                 struct mlx5_flow_tbl_data_entry, tbl);
15728                 def_policy->dr_jump_action[RTE_COLOR_GREEN] =
15729                                                 tbl_data->jump.action;
15730                 acts[RTE_COLOR_GREEN].dv_actions[0] =
15731                                                 tbl_data->jump.action;
15732                 acts[RTE_COLOR_GREEN].actions_n = 1;
15733                 /* Create jump action to the drop table. */
15734                 if (!mtrmng->drop_tbl[domain]) {
15735                         mtrmng->drop_tbl[domain] = flow_dv_tbl_resource_get
15736                                 (dev, MLX5_FLOW_TABLE_LEVEL_METER,
15737                                 egress, transfer, false, NULL, 0,
15738                                 0, MLX5_MTR_TABLE_ID_DROP, &error);
15739                         if (!mtrmng->drop_tbl[domain]) {
15740                                 DRV_LOG(ERR, "Failed to create "
15741                                 "meter drop table for default policy.");
15742                                 goto def_policy_error;
15743                         }
15744                 }
15745                 tbl_data = container_of(mtrmng->drop_tbl[domain],
15746                                 struct mlx5_flow_tbl_data_entry, tbl);
15747                 def_policy->dr_jump_action[RTE_COLOR_RED] =
15748                                                 tbl_data->jump.action;
15749                 acts[RTE_COLOR_RED].dv_actions[0] = tbl_data->jump.action;
15750                 acts[RTE_COLOR_RED].actions_n = 1;
15751                 /* Create default policy rules. */
15752                 ret = __flow_dv_create_domain_policy_rules(dev,
15753                                         &def_policy->sub_policy,
15754                                         egress, transfer, false, acts);
15755                 if (ret) {
15756                         DRV_LOG(ERR, "Failed to create "
15757                                 "default policy rules.");
15758                                 goto def_policy_error;
15759                 }
15760         }
15761         return 0;
15762 def_policy_error:
15763         __flow_dv_destroy_domain_def_policy(dev,
15764                         (enum mlx5_meter_domain)domain);
15765         return -1;
15766 }
15767
15768 /**
15769  * Create the default policy table set.
15770  *
15771  * @param[in] dev
15772  *   Pointer to Ethernet device.
15773  * @return
15774  *   0 on success, -1 otherwise.
15775  */
15776 static int
15777 flow_dv_create_def_policy(struct rte_eth_dev *dev)
15778 {
15779         struct mlx5_priv *priv = dev->data->dev_private;
15780         int i;
15781
15782         /* Non-termination policy table. */
15783         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15784                 if (!priv->config.dv_esw_en && i == MLX5_MTR_DOMAIN_TRANSFER)
15785                         continue;
15786                 if (__flow_dv_create_domain_def_policy(dev, i)) {
15787                         DRV_LOG(ERR,
15788                         "Failed to create default policy");
15789                         return -1;
15790                 }
15791         }
15792         return 0;
15793 }
15794
15795 /**
15796  * Create the needed meter tables.
15797  * Lock free, (mutex should be acquired by caller).
15798  *
15799  * @param[in] dev
15800  *   Pointer to Ethernet device.
15801  * @param[in] fm
15802  *   Meter information table.
15803  * @param[in] mtr_idx
15804  *   Meter index.
15805  * @param[in] domain_bitmap
15806  *   Domain bitmap.
15807  * @return
15808  *   0 on success, -1 otherwise.
15809  */
15810 static int
15811 flow_dv_create_mtr_tbls(struct rte_eth_dev *dev,
15812                         struct mlx5_flow_meter_info *fm,
15813                         uint32_t mtr_idx,
15814                         uint8_t domain_bitmap)
15815 {
15816         struct mlx5_priv *priv = dev->data->dev_private;
15817         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
15818         struct rte_flow_error error;
15819         struct mlx5_flow_tbl_data_entry *tbl_data;
15820         uint8_t egress, transfer;
15821         void *actions[METER_ACTIONS];
15822         int domain, ret, i;
15823         struct mlx5_flow_counter *cnt;
15824         struct mlx5_flow_dv_match_params value = {
15825                 .size = sizeof(value.buf) -
15826                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
15827         };
15828         struct mlx5_flow_dv_match_params matcher_para = {
15829                 .size = sizeof(matcher_para.buf) -
15830                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
15831         };
15832         int mtr_id_reg_c = mlx5_flow_get_reg_id(dev, MLX5_MTR_ID,
15833                                                      0, &error);
15834         uint32_t mtr_id_mask = (UINT32_C(1) << mtrmng->max_mtr_bits) - 1;
15835         uint8_t mtr_id_offset = priv->mtr_reg_share ? MLX5_MTR_COLOR_BITS : 0;
15836         struct mlx5_cache_entry *entry;
15837         struct mlx5_flow_dv_matcher matcher = {
15838                 .mask = {
15839                         .size = sizeof(matcher.mask.buf) -
15840                         MLX5_ST_SZ_BYTES(fte_match_set_misc4),
15841                 },
15842         };
15843         struct mlx5_flow_dv_matcher *drop_matcher;
15844         struct mlx5_flow_cb_ctx ctx = {
15845                 .error = &error,
15846                 .data = &matcher,
15847         };
15848
15849         if (!priv->mtr_en || mtr_id_reg_c < 0) {
15850                 rte_errno = ENOTSUP;
15851                 return -1;
15852         }
15853         for (domain = 0; domain < MLX5_MTR_DOMAIN_MAX; domain++) {
15854                 if (!(domain_bitmap & (1 << domain)) ||
15855                         (mtrmng->def_rule[domain] && !fm->drop_cnt))
15856                         continue;
15857                 egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
15858                 transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
15859                 /* Create the drop table with METER DROP level. */
15860                 if (!mtrmng->drop_tbl[domain]) {
15861                         mtrmng->drop_tbl[domain] = flow_dv_tbl_resource_get(dev,
15862                                         MLX5_FLOW_TABLE_LEVEL_METER,
15863                                         egress, transfer, false, NULL, 0,
15864                                         0, MLX5_MTR_TABLE_ID_DROP, &error);
15865                         if (!mtrmng->drop_tbl[domain]) {
15866                                 DRV_LOG(ERR, "Failed to create meter drop table.");
15867                                 goto policy_error;
15868                         }
15869                 }
15870                 /* Create default matcher in drop table. */
15871                 matcher.tbl = mtrmng->drop_tbl[domain],
15872                 tbl_data = container_of(mtrmng->drop_tbl[domain],
15873                                 struct mlx5_flow_tbl_data_entry, tbl);
15874                 if (!mtrmng->def_matcher[domain]) {
15875                         flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
15876                                        (enum modify_reg)mtr_id_reg_c,
15877                                        0, 0);
15878                         matcher.priority = MLX5_MTRS_DEFAULT_RULE_PRIORITY;
15879                         matcher.crc = rte_raw_cksum
15880                                         ((const void *)matcher.mask.buf,
15881                                         matcher.mask.size);
15882                         entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
15883                         if (!entry) {
15884                                 DRV_LOG(ERR, "Failed to register meter "
15885                                 "drop default matcher.");
15886                                 goto policy_error;
15887                         }
15888                         mtrmng->def_matcher[domain] = container_of(entry,
15889                         struct mlx5_flow_dv_matcher, entry);
15890                 }
15891                 /* Create default rule in drop table. */
15892                 if (!mtrmng->def_rule[domain]) {
15893                         i = 0;
15894                         actions[i++] = priv->sh->dr_drop_action;
15895                         flow_dv_match_meta_reg(matcher_para.buf, value.buf,
15896                                 (enum modify_reg)mtr_id_reg_c, 0, 0);
15897                         ret = mlx5_flow_os_create_flow
15898                                 (mtrmng->def_matcher[domain]->matcher_object,
15899                                 (void *)&value, i, actions,
15900                                 &mtrmng->def_rule[domain]);
15901                         if (ret) {
15902                                 DRV_LOG(ERR, "Failed to create meter "
15903                                 "default drop rule for drop table.");
15904                                 goto policy_error;
15905                         }
15906                 }
15907                 if (!fm->drop_cnt)
15908                         continue;
15909                 MLX5_ASSERT(mtrmng->max_mtr_bits);
15910                 if (!mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1]) {
15911                         /* Create matchers for Drop. */
15912                         flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
15913                                         (enum modify_reg)mtr_id_reg_c, 0,
15914                                         (mtr_id_mask << mtr_id_offset));
15915                         matcher.priority = MLX5_REG_BITS - mtrmng->max_mtr_bits;
15916                         matcher.crc = rte_raw_cksum
15917                                         ((const void *)matcher.mask.buf,
15918                                         matcher.mask.size);
15919                         entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
15920                         if (!entry) {
15921                                 DRV_LOG(ERR,
15922                                 "Failed to register meter drop matcher.");
15923                                 goto policy_error;
15924                         }
15925                         mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1] =
15926                                 container_of(entry, struct mlx5_flow_dv_matcher,
15927                                              entry);
15928                 }
15929                 drop_matcher =
15930                         mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1];
15931                 /* Create drop rule, matching meter_id only. */
15932                 flow_dv_match_meta_reg(matcher_para.buf, value.buf,
15933                                 (enum modify_reg)mtr_id_reg_c,
15934                                 (mtr_idx << mtr_id_offset), UINT32_MAX);
15935                 i = 0;
15936                 cnt = flow_dv_counter_get_by_idx(dev,
15937                                         fm->drop_cnt, NULL);
15938                 actions[i++] = cnt->action;
15939                 actions[i++] = priv->sh->dr_drop_action;
15940                 ret = mlx5_flow_os_create_flow(drop_matcher->matcher_object,
15941                                                (void *)&value, i, actions,
15942                                                &fm->drop_rule[domain]);
15943                 if (ret) {
15944                         DRV_LOG(ERR, "Failed to create meter "
15945                                 "drop rule for drop table.");
15946                                 goto policy_error;
15947                 }
15948         }
15949         return 0;
15950 policy_error:
15951         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15952                 if (fm->drop_rule[i]) {
15953                         claim_zero(mlx5_flow_os_destroy_flow
15954                                 (fm->drop_rule[i]));
15955                         fm->drop_rule[i] = NULL;
15956                 }
15957         }
15958         return -1;
15959 }
15960
15961 /**
15962  * Find the policy table for prefix table with RSS.
15963  *
15964  * @param[in] dev
15965  *   Pointer to Ethernet device.
15966  * @param[in] mtr_policy
15967  *   Pointer to meter policy table.
15968  * @param[in] rss_desc
15969  *   Pointer to rss_desc
15970  * @return
15971  *   Pointer to table set on success, NULL otherwise and rte_errno is set.
15972  */
15973 static struct mlx5_flow_meter_sub_policy *
15974 flow_dv_meter_sub_policy_rss_prepare(struct rte_eth_dev *dev,
15975                 struct mlx5_flow_meter_policy *mtr_policy,
15976                 struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS])
15977 {
15978         struct mlx5_priv *priv = dev->data->dev_private;
15979         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
15980         uint32_t sub_policy_idx = 0;
15981         uint32_t hrxq_idx[MLX5_MTR_RTE_COLORS] = {0};
15982         uint32_t i, j;
15983         struct mlx5_hrxq *hrxq;
15984         struct mlx5_flow_handle dh;
15985         struct mlx5_meter_policy_action_container *act_cnt;
15986         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
15987         uint16_t sub_policy_num;
15988
15989         rte_spinlock_lock(&mtr_policy->sl);
15990         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
15991                 if (!rss_desc[i])
15992                         continue;
15993                 hrxq_idx[i] = mlx5_hrxq_get(dev, rss_desc[i]);
15994                 if (!hrxq_idx[i]) {
15995                         rte_spinlock_unlock(&mtr_policy->sl);
15996                         return NULL;
15997                 }
15998         }
15999         sub_policy_num = (mtr_policy->sub_policy_num >>
16000                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16001                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16002         for (i = 0; i < sub_policy_num;
16003                 i++) {
16004                 for (j = 0; j < MLX5_MTR_RTE_COLORS; j++) {
16005                         if (rss_desc[j] &&
16006                                 hrxq_idx[j] !=
16007                         mtr_policy->sub_policys[domain][i]->rix_hrxq[j])
16008                                 break;
16009                 }
16010                 if (j >= MLX5_MTR_RTE_COLORS) {
16011                         /*
16012                          * Found the sub policy table with
16013                          * the same queue per color
16014                          */
16015                         rte_spinlock_unlock(&mtr_policy->sl);
16016                         for (j = 0; j < MLX5_MTR_RTE_COLORS; j++)
16017                                 mlx5_hrxq_release(dev, hrxq_idx[j]);
16018                         return mtr_policy->sub_policys[domain][i];
16019                 }
16020         }
16021         /* Create sub policy. */
16022         if (!mtr_policy->sub_policys[domain][0]->rix_hrxq[0]) {
16023                 /* Reuse the first dummy sub_policy*/
16024                 sub_policy = mtr_policy->sub_policys[domain][0];
16025                 sub_policy_idx = sub_policy->idx;
16026         } else {
16027                 sub_policy = mlx5_ipool_zmalloc
16028                                 (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16029                                 &sub_policy_idx);
16030                 if (!sub_policy ||
16031                         sub_policy_idx > MLX5_MAX_SUB_POLICY_TBL_NUM) {
16032                         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++)
16033                                 mlx5_hrxq_release(dev, hrxq_idx[i]);
16034                         goto rss_sub_policy_error;
16035                 }
16036                 sub_policy->idx = sub_policy_idx;
16037                 sub_policy->main_policy = mtr_policy;
16038         }
16039         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
16040                 if (!rss_desc[i])
16041                         continue;
16042                 sub_policy->rix_hrxq[i] = hrxq_idx[i];
16043                 /*
16044                  * Overwrite the last action from
16045                  * RSS action to Queue action.
16046                  */
16047                 hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
16048                               hrxq_idx[i]);
16049                 if (!hrxq) {
16050                         DRV_LOG(ERR, "Failed to create policy hrxq");
16051                         goto rss_sub_policy_error;
16052                 }
16053                 act_cnt = &mtr_policy->act_cnt[i];
16054                 if (act_cnt->rix_mark || act_cnt->modify_hdr) {
16055                         memset(&dh, 0, sizeof(struct mlx5_flow_handle));
16056                         if (act_cnt->rix_mark)
16057                                 dh.mark = 1;
16058                         dh.fate_action = MLX5_FLOW_FATE_QUEUE;
16059                         dh.rix_hrxq = hrxq_idx[i];
16060                         flow_drv_rxq_flags_set(dev, &dh);
16061                 }
16062         }
16063         if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
16064                 sub_policy, domain)) {
16065                 DRV_LOG(ERR, "Failed to create policy "
16066                         "rules per domain.");
16067                 goto rss_sub_policy_error;
16068         }
16069         if (sub_policy != mtr_policy->sub_policys[domain][0]) {
16070                 i = (mtr_policy->sub_policy_num >>
16071                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16072                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16073                 mtr_policy->sub_policys[domain][i] = sub_policy;
16074                 i++;
16075                 if (i > MLX5_MTR_RSS_MAX_SUB_POLICY)
16076                         goto rss_sub_policy_error;
16077                 mtr_policy->sub_policy_num &= ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
16078                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
16079                 mtr_policy->sub_policy_num |=
16080                         (i & MLX5_MTR_SUB_POLICY_NUM_MASK) <<
16081                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
16082         }
16083         rte_spinlock_unlock(&mtr_policy->sl);
16084         return sub_policy;
16085 rss_sub_policy_error:
16086         if (sub_policy) {
16087                 __flow_dv_destroy_sub_policy_rules(dev, sub_policy);
16088                 if (sub_policy != mtr_policy->sub_policys[domain][0]) {
16089                         i = (mtr_policy->sub_policy_num >>
16090                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16091                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16092                         mtr_policy->sub_policys[domain][i] = NULL;
16093                         mlx5_ipool_free
16094                         (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16095                                         sub_policy->idx);
16096                 }
16097         }
16098         if (sub_policy_idx)
16099                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16100                         sub_policy_idx);
16101         rte_spinlock_unlock(&mtr_policy->sl);
16102         return NULL;
16103 }
16104
16105
16106 /**
16107  * Destroy the sub policy table with RX queue.
16108  *
16109  * @param[in] dev
16110  *   Pointer to Ethernet device.
16111  * @param[in] mtr_policy
16112  *   Pointer to meter policy table.
16113  */
16114 static void
16115 flow_dv_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev,
16116                 struct mlx5_flow_meter_policy *mtr_policy)
16117 {
16118         struct mlx5_priv *priv = dev->data->dev_private;
16119         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
16120         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
16121         uint32_t i, j;
16122         uint16_t sub_policy_num, new_policy_num;
16123
16124         rte_spinlock_lock(&mtr_policy->sl);
16125         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
16126                 switch (mtr_policy->act_cnt[i].fate_action) {
16127                 case MLX5_FLOW_FATE_SHARED_RSS:
16128                         sub_policy_num = (mtr_policy->sub_policy_num >>
16129                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16130                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16131                         new_policy_num = sub_policy_num;
16132                         for (j = 0; j < sub_policy_num; j++) {
16133                                 sub_policy =
16134                                         mtr_policy->sub_policys[domain][j];
16135                                 if (sub_policy) {
16136                                         __flow_dv_destroy_sub_policy_rules(dev,
16137                                                 sub_policy);
16138                                 if (sub_policy !=
16139                                         mtr_policy->sub_policys[domain][0]) {
16140                                         mtr_policy->sub_policys[domain][j] =
16141                                                                 NULL;
16142                                         mlx5_ipool_free
16143                                 (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16144                                                 sub_policy->idx);
16145                                                 new_policy_num--;
16146                                         }
16147                                 }
16148                         }
16149                         if (new_policy_num != sub_policy_num) {
16150                                 mtr_policy->sub_policy_num &=
16151                                 ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
16152                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
16153                                 mtr_policy->sub_policy_num |=
16154                                 (new_policy_num &
16155                                         MLX5_MTR_SUB_POLICY_NUM_MASK) <<
16156                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
16157                         }
16158                         break;
16159                 case MLX5_FLOW_FATE_QUEUE:
16160                         sub_policy = mtr_policy->sub_policys[domain][0];
16161                         __flow_dv_destroy_sub_policy_rules(dev,
16162                                                 sub_policy);
16163                         break;
16164                 default:
16165                         /*Other actions without queue and do nothing*/
16166                         break;
16167                 }
16168         }
16169         rte_spinlock_unlock(&mtr_policy->sl);
16170 }
16171
16172 /**
16173  * Validate the batch counter support in root table.
16174  *
16175  * Create a simple flow with invalid counter and drop action on root table to
16176  * validate if batch counter with offset on root table is supported or not.
16177  *
16178  * @param[in] dev
16179  *   Pointer to rte_eth_dev structure.
16180  *
16181  * @return
16182  *   0 on success, a negative errno value otherwise and rte_errno is set.
16183  */
16184 int
16185 mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
16186 {
16187         struct mlx5_priv *priv = dev->data->dev_private;
16188         struct mlx5_dev_ctx_shared *sh = priv->sh;
16189         struct mlx5_flow_dv_match_params mask = {
16190                 .size = sizeof(mask.buf),
16191         };
16192         struct mlx5_flow_dv_match_params value = {
16193                 .size = sizeof(value.buf),
16194         };
16195         struct mlx5dv_flow_matcher_attr dv_attr = {
16196                 .type = IBV_FLOW_ATTR_NORMAL | IBV_FLOW_ATTR_FLAGS_EGRESS,
16197                 .priority = 0,
16198                 .match_criteria_enable = 0,
16199                 .match_mask = (void *)&mask,
16200         };
16201         void *actions[2] = { 0 };
16202         struct mlx5_flow_tbl_resource *tbl = NULL;
16203         struct mlx5_devx_obj *dcs = NULL;
16204         void *matcher = NULL;
16205         void *flow = NULL;
16206         int ret = -1;
16207
16208         tbl = flow_dv_tbl_resource_get(dev, 0, 1, 0, false, NULL,
16209                                         0, 0, 0, NULL);
16210         if (!tbl)
16211                 goto err;
16212         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
16213         if (!dcs)
16214                 goto err;
16215         ret = mlx5_flow_os_create_flow_action_count(dcs->obj, UINT16_MAX,
16216                                                     &actions[0]);
16217         if (ret)
16218                 goto err;
16219         dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
16220         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
16221                                                &matcher);
16222         if (ret)
16223                 goto err;
16224         ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 1,
16225                                        actions, &flow);
16226 err:
16227         /*
16228          * If batch counter with offset is not supported, the driver will not
16229          * validate the invalid offset value, flow create should success.
16230          * In this case, it means batch counter is not supported in root table.
16231          *
16232          * Otherwise, if flow create is failed, counter offset is supported.
16233          */
16234         if (flow) {
16235                 DRV_LOG(INFO, "Batch counter is not supported in root "
16236                               "table. Switch to fallback mode.");
16237                 rte_errno = ENOTSUP;
16238                 ret = -rte_errno;
16239                 claim_zero(mlx5_flow_os_destroy_flow(flow));
16240         } else {
16241                 /* Check matcher to make sure validate fail at flow create. */
16242                 if (!matcher || (matcher && errno != EINVAL))
16243                         DRV_LOG(ERR, "Unexpected error in counter offset "
16244                                      "support detection");
16245                 ret = 0;
16246         }
16247         if (actions[0])
16248                 claim_zero(mlx5_flow_os_destroy_flow_action(actions[0]));
16249         if (matcher)
16250                 claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
16251         if (tbl)
16252                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
16253         if (dcs)
16254                 claim_zero(mlx5_devx_cmd_destroy(dcs));
16255         return ret;
16256 }
16257
16258 /**
16259  * Query a devx counter.
16260  *
16261  * @param[in] dev
16262  *   Pointer to the Ethernet device structure.
16263  * @param[in] cnt
16264  *   Index to the flow counter.
16265  * @param[in] clear
16266  *   Set to clear the counter statistics.
16267  * @param[out] pkts
16268  *   The statistics value of packets.
16269  * @param[out] bytes
16270  *   The statistics value of bytes.
16271  *
16272  * @return
16273  *   0 on success, otherwise return -1.
16274  */
16275 static int
16276 flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear,
16277                       uint64_t *pkts, uint64_t *bytes)
16278 {
16279         struct mlx5_priv *priv = dev->data->dev_private;
16280         struct mlx5_flow_counter *cnt;
16281         uint64_t inn_pkts, inn_bytes;
16282         int ret;
16283
16284         if (!priv->config.devx)
16285                 return -1;
16286
16287         ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes);
16288         if (ret)
16289                 return -1;
16290         cnt = flow_dv_counter_get_by_idx(dev, counter, NULL);
16291         *pkts = inn_pkts - cnt->hits;
16292         *bytes = inn_bytes - cnt->bytes;
16293         if (clear) {
16294                 cnt->hits = inn_pkts;
16295                 cnt->bytes = inn_bytes;
16296         }
16297         return 0;
16298 }
16299
16300 /**
16301  * Get aged-out flows.
16302  *
16303  * @param[in] dev
16304  *   Pointer to the Ethernet device structure.
16305  * @param[in] context
16306  *   The address of an array of pointers to the aged-out flows contexts.
16307  * @param[in] nb_contexts
16308  *   The length of context array pointers.
16309  * @param[out] error
16310  *   Perform verbose error reporting if not NULL. Initialized in case of
16311  *   error only.
16312  *
16313  * @return
16314  *   how many contexts get in success, otherwise negative errno value.
16315  *   if nb_contexts is 0, return the amount of all aged contexts.
16316  *   if nb_contexts is not 0 , return the amount of aged flows reported
16317  *   in the context array.
16318  * @note: only stub for now
16319  */
16320 static int
16321 flow_get_aged_flows(struct rte_eth_dev *dev,
16322                     void **context,
16323                     uint32_t nb_contexts,
16324                     struct rte_flow_error *error)
16325 {
16326         struct mlx5_priv *priv = dev->data->dev_private;
16327         struct mlx5_age_info *age_info;
16328         struct mlx5_age_param *age_param;
16329         struct mlx5_flow_counter *counter;
16330         struct mlx5_aso_age_action *act;
16331         int nb_flows = 0;
16332
16333         if (nb_contexts && !context)
16334                 return rte_flow_error_set(error, EINVAL,
16335                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
16336                                           NULL, "empty context");
16337         age_info = GET_PORT_AGE_INFO(priv);
16338         rte_spinlock_lock(&age_info->aged_sl);
16339         LIST_FOREACH(act, &age_info->aged_aso, next) {
16340                 nb_flows++;
16341                 if (nb_contexts) {
16342                         context[nb_flows - 1] =
16343                                                 act->age_params.context;
16344                         if (!(--nb_contexts))
16345                                 break;
16346                 }
16347         }
16348         TAILQ_FOREACH(counter, &age_info->aged_counters, next) {
16349                 nb_flows++;
16350                 if (nb_contexts) {
16351                         age_param = MLX5_CNT_TO_AGE(counter);
16352                         context[nb_flows - 1] = age_param->context;
16353                         if (!(--nb_contexts))
16354                                 break;
16355                 }
16356         }
16357         rte_spinlock_unlock(&age_info->aged_sl);
16358         MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
16359         return nb_flows;
16360 }
16361
16362 /*
16363  * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
16364  */
16365 static uint32_t
16366 flow_dv_counter_allocate(struct rte_eth_dev *dev)
16367 {
16368         return flow_dv_counter_alloc(dev, 0);
16369 }
16370
16371 /**
16372  * Validate indirect action.
16373  * Dispatcher for action type specific validation.
16374  *
16375  * @param[in] dev
16376  *   Pointer to the Ethernet device structure.
16377  * @param[in] conf
16378  *   Indirect action configuration.
16379  * @param[in] action
16380  *   The indirect action object to validate.
16381  * @param[out] error
16382  *   Perform verbose error reporting if not NULL. Initialized in case of
16383  *   error only.
16384  *
16385  * @return
16386  *   0 on success, otherwise negative errno value.
16387  */
16388 static int
16389 flow_dv_action_validate(struct rte_eth_dev *dev,
16390                         const struct rte_flow_indir_action_conf *conf,
16391                         const struct rte_flow_action *action,
16392                         struct rte_flow_error *err)
16393 {
16394         struct mlx5_priv *priv = dev->data->dev_private;
16395
16396         RTE_SET_USED(conf);
16397         switch (action->type) {
16398         case RTE_FLOW_ACTION_TYPE_RSS:
16399                 /*
16400                  * priv->obj_ops is set according to driver capabilities.
16401                  * When DevX capabilities are
16402                  * sufficient, it is set to devx_obj_ops.
16403                  * Otherwise, it is set to ibv_obj_ops.
16404                  * ibv_obj_ops doesn't support ind_table_modify operation.
16405                  * In this case the indirect RSS action can't be used.
16406                  */
16407                 if (priv->obj_ops.ind_table_modify == NULL)
16408                         return rte_flow_error_set
16409                                         (err, ENOTSUP,
16410                                          RTE_FLOW_ERROR_TYPE_ACTION,
16411                                          NULL,
16412                                          "Indirect RSS action not supported");
16413                 return mlx5_validate_action_rss(dev, action, err);
16414         case RTE_FLOW_ACTION_TYPE_AGE:
16415                 if (!priv->sh->aso_age_mng)
16416                         return rte_flow_error_set(err, ENOTSUP,
16417                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
16418                                                 NULL,
16419                                                 "Indirect age action not supported");
16420                 return flow_dv_validate_action_age(0, action, dev, err);
16421         case RTE_FLOW_ACTION_TYPE_COUNT:
16422                 /*
16423                  * There are two mechanisms to share the action count.
16424                  * The old mechanism uses the shared field to share, while the
16425                  * new mechanism uses the indirect action API.
16426                  * This validation comes to make sure that the two mechanisms
16427                  * are not combined.
16428                  */
16429                 if (is_shared_action_count(action))
16430                         return rte_flow_error_set(err, ENOTSUP,
16431                                                   RTE_FLOW_ERROR_TYPE_ACTION,
16432                                                   NULL,
16433                                                   "Mix shared and indirect counter is not supported");
16434                 return flow_dv_validate_action_count(dev, true, 0, err);
16435         case RTE_FLOW_ACTION_TYPE_CONNTRACK:
16436                 if (!priv->sh->ct_aso_en)
16437                         return rte_flow_error_set(err, ENOTSUP,
16438                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
16439                                         "ASO CT is not supported");
16440                 return mlx5_validate_action_ct(dev, action->conf, err);
16441         default:
16442                 return rte_flow_error_set(err, ENOTSUP,
16443                                           RTE_FLOW_ERROR_TYPE_ACTION,
16444                                           NULL,
16445                                           "action type not supported");
16446         }
16447 }
16448
16449 /**
16450  * Validate meter policy actions.
16451  * Dispatcher for action type specific validation.
16452  *
16453  * @param[in] dev
16454  *   Pointer to the Ethernet device structure.
16455  * @param[in] action
16456  *   The meter policy action object to validate.
16457  * @param[in] attr
16458  *   Attributes of flow to determine steering domain.
16459  * @param[out] error
16460  *   Perform verbose error reporting if not NULL. Initialized in case of
16461  *   error only.
16462  *
16463  * @return
16464  *   0 on success, otherwise negative errno value.
16465  */
16466 static int
16467 flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev,
16468                         const struct rte_flow_action *actions[RTE_COLORS],
16469                         struct rte_flow_attr *attr,
16470                         bool *is_rss,
16471                         uint8_t *domain_bitmap,
16472                         bool *is_def_policy,
16473                         struct rte_mtr_error *error)
16474 {
16475         struct mlx5_priv *priv = dev->data->dev_private;
16476         struct mlx5_dev_config *dev_conf = &priv->config;
16477         const struct rte_flow_action *act;
16478         uint64_t action_flags = 0;
16479         int actions_n;
16480         int i, ret;
16481         struct rte_flow_error flow_err;
16482         uint8_t domain_color[RTE_COLORS] = {0};
16483         uint8_t def_domain = MLX5_MTR_ALL_DOMAIN_BIT;
16484
16485         if (!priv->config.dv_esw_en)
16486                 def_domain &= ~MLX5_MTR_DOMAIN_TRANSFER_BIT;
16487         *domain_bitmap = def_domain;
16488         if (actions[RTE_COLOR_YELLOW] &&
16489                 actions[RTE_COLOR_YELLOW]->type != RTE_FLOW_ACTION_TYPE_END)
16490                 return -rte_mtr_error_set(error, ENOTSUP,
16491                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
16492                                 NULL,
16493                                 "Yellow color does not support any action.");
16494         if (actions[RTE_COLOR_YELLOW] &&
16495                 actions[RTE_COLOR_YELLOW]->type != RTE_FLOW_ACTION_TYPE_DROP)
16496                 return -rte_mtr_error_set(error, ENOTSUP,
16497                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
16498                                 NULL, "Red color only supports drop action.");
16499         /*
16500          * Check default policy actions:
16501          * Green/Yellow: no action, Red: drop action
16502          */
16503         if ((!actions[RTE_COLOR_GREEN] ||
16504                 actions[RTE_COLOR_GREEN]->type == RTE_FLOW_ACTION_TYPE_END)) {
16505                 *is_def_policy = true;
16506                 return 0;
16507         }
16508         flow_err.message = NULL;
16509         for (i = 0; i < RTE_COLORS; i++) {
16510                 act = actions[i];
16511                 for (action_flags = 0, actions_n = 0;
16512                         act && act->type != RTE_FLOW_ACTION_TYPE_END;
16513                         act++) {
16514                         if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
16515                                 return -rte_mtr_error_set(error, ENOTSUP,
16516                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
16517                                           NULL, "too many actions");
16518                         switch (act->type) {
16519                         case RTE_FLOW_ACTION_TYPE_PORT_ID:
16520                                 if (!priv->config.dv_esw_en)
16521                                         return -rte_mtr_error_set(error,
16522                                         ENOTSUP,
16523                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16524                                         NULL, "PORT action validate check"
16525                                         " fail for ESW disable");
16526                                 ret = flow_dv_validate_action_port_id(dev,
16527                                                 action_flags,
16528                                                 act, attr, &flow_err);
16529                                 if (ret)
16530                                         return -rte_mtr_error_set(error,
16531                                         ENOTSUP,
16532                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16533                                         NULL, flow_err.message ?
16534                                         flow_err.message :
16535                                         "PORT action validate check fail");
16536                                 ++actions_n;
16537                                 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
16538                                 break;
16539                         case RTE_FLOW_ACTION_TYPE_MARK:
16540                                 ret = flow_dv_validate_action_mark(dev, act,
16541                                                            action_flags,
16542                                                            attr, &flow_err);
16543                                 if (ret < 0)
16544                                         return -rte_mtr_error_set(error,
16545                                         ENOTSUP,
16546                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16547                                         NULL, flow_err.message ?
16548                                         flow_err.message :
16549                                         "Mark action validate check fail");
16550                                 if (dev_conf->dv_xmeta_en !=
16551                                         MLX5_XMETA_MODE_LEGACY)
16552                                         return -rte_mtr_error_set(error,
16553                                         ENOTSUP,
16554                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16555                                         NULL, "Extend MARK action is "
16556                                         "not supported. Please try use "
16557                                         "default policy for meter.");
16558                                 action_flags |= MLX5_FLOW_ACTION_MARK;
16559                                 ++actions_n;
16560                                 break;
16561                         case RTE_FLOW_ACTION_TYPE_SET_TAG:
16562                                 ret = flow_dv_validate_action_set_tag(dev,
16563                                                         act, action_flags,
16564                                                         attr, &flow_err);
16565                                 if (ret)
16566                                         return -rte_mtr_error_set(error,
16567                                         ENOTSUP,
16568                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16569                                         NULL, flow_err.message ?
16570                                         flow_err.message :
16571                                         "Set tag action validate check fail");
16572                                 /*
16573                                  * Count all modify-header actions
16574                                  * as one action.
16575                                  */
16576                                 if (!(action_flags &
16577                                         MLX5_FLOW_MODIFY_HDR_ACTIONS))
16578                                         ++actions_n;
16579                                 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
16580                                 break;
16581                         case RTE_FLOW_ACTION_TYPE_DROP:
16582                                 ret = mlx5_flow_validate_action_drop
16583                                         (action_flags,
16584                                         attr, &flow_err);
16585                                 if (ret < 0)
16586                                         return -rte_mtr_error_set(error,
16587                                         ENOTSUP,
16588                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16589                                         NULL, flow_err.message ?
16590                                         flow_err.message :
16591                                         "Drop action validate check fail");
16592                                 action_flags |= MLX5_FLOW_ACTION_DROP;
16593                                 ++actions_n;
16594                                 break;
16595                         case RTE_FLOW_ACTION_TYPE_QUEUE:
16596                                 /*
16597                                  * Check whether extensive
16598                                  * metadata feature is engaged.
16599                                  */
16600                                 if (dev_conf->dv_flow_en &&
16601                                         (dev_conf->dv_xmeta_en !=
16602                                         MLX5_XMETA_MODE_LEGACY) &&
16603                                         mlx5_flow_ext_mreg_supported(dev))
16604                                         return -rte_mtr_error_set(error,
16605                                           ENOTSUP,
16606                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
16607                                           NULL, "Queue action with meta "
16608                                           "is not supported. Please try use "
16609                                           "default policy for meter.");
16610                                 ret = mlx5_flow_validate_action_queue(act,
16611                                                         action_flags, dev,
16612                                                         attr, &flow_err);
16613                                 if (ret < 0)
16614                                         return -rte_mtr_error_set(error,
16615                                           ENOTSUP,
16616                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
16617                                           NULL, flow_err.message ?
16618                                           flow_err.message :
16619                                           "Queue action validate check fail");
16620                                 action_flags |= MLX5_FLOW_ACTION_QUEUE;
16621                                 ++actions_n;
16622                                 break;
16623                         case RTE_FLOW_ACTION_TYPE_RSS:
16624                                 if (dev_conf->dv_flow_en &&
16625                                         (dev_conf->dv_xmeta_en !=
16626                                         MLX5_XMETA_MODE_LEGACY) &&
16627                                         mlx5_flow_ext_mreg_supported(dev))
16628                                         return -rte_mtr_error_set(error,
16629                                           ENOTSUP,
16630                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
16631                                           NULL, "RSS action with meta "
16632                                           "is not supported. Please try use "
16633                                           "default policy for meter.");
16634                                 ret = mlx5_validate_action_rss(dev, act,
16635                                                 &flow_err);
16636                                 if (ret < 0)
16637                                         return -rte_mtr_error_set(error,
16638                                           ENOTSUP,
16639                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
16640                                           NULL, flow_err.message ?
16641                                           flow_err.message :
16642                                           "RSS action validate check fail");
16643                                 action_flags |= MLX5_FLOW_ACTION_RSS;
16644                                 ++actions_n;
16645                                 *is_rss = true;
16646                                 break;
16647                         case RTE_FLOW_ACTION_TYPE_JUMP:
16648                                 ret = flow_dv_validate_action_jump(dev,
16649                                         NULL, act, action_flags,
16650                                         attr, true, &flow_err);
16651                                 if (ret)
16652                                         return -rte_mtr_error_set(error,
16653                                           ENOTSUP,
16654                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
16655                                           NULL, flow_err.message ?
16656                                           flow_err.message :
16657                                           "Jump action validate check fail");
16658                                 ++actions_n;
16659                                 action_flags |= MLX5_FLOW_ACTION_JUMP;
16660                                 break;
16661                         default:
16662                                 return -rte_mtr_error_set(error, ENOTSUP,
16663                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16664                                         NULL,
16665                                         "Doesn't support optional action");
16666                         }
16667                 }
16668                 /* Yellow is not supported, just skip. */
16669                 if (i == RTE_COLOR_YELLOW)
16670                         continue;
16671                 if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
16672                         domain_color[i] = MLX5_MTR_DOMAIN_TRANSFER_BIT;
16673                 else if ((action_flags &
16674                         (MLX5_FLOW_ACTION_RSS | MLX5_FLOW_ACTION_QUEUE)) ||
16675                         (action_flags & MLX5_FLOW_ACTION_MARK))
16676                         /*
16677                          * Only support MLX5_XMETA_MODE_LEGACY
16678                          * so MARK action only in ingress domain.
16679                          */
16680                         domain_color[i] = MLX5_MTR_DOMAIN_INGRESS_BIT;
16681                 else
16682                         domain_color[i] = def_domain;
16683                 /*
16684                  * Validate the drop action mutual exclusion
16685                  * with other actions. Drop action is mutually-exclusive
16686                  * with any other action, except for Count action.
16687                  */
16688                 if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
16689                         (action_flags & ~MLX5_FLOW_ACTION_DROP)) {
16690                         return -rte_mtr_error_set(error, ENOTSUP,
16691                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
16692                                 NULL, "Drop action is mutually-exclusive "
16693                                 "with any other action");
16694                 }
16695                 /* Eswitch has few restrictions on using items and actions */
16696                 if (domain_color[i] & MLX5_MTR_DOMAIN_TRANSFER_BIT) {
16697                         if (!mlx5_flow_ext_mreg_supported(dev) &&
16698                                 action_flags & MLX5_FLOW_ACTION_MARK)
16699                                 return -rte_mtr_error_set(error, ENOTSUP,
16700                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16701                                         NULL, "unsupported action MARK");
16702                         if (action_flags & MLX5_FLOW_ACTION_QUEUE)
16703                                 return -rte_mtr_error_set(error, ENOTSUP,
16704                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16705                                         NULL, "unsupported action QUEUE");
16706                         if (action_flags & MLX5_FLOW_ACTION_RSS)
16707                                 return -rte_mtr_error_set(error, ENOTSUP,
16708                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16709                                         NULL, "unsupported action RSS");
16710                         if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
16711                                 return -rte_mtr_error_set(error, ENOTSUP,
16712                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16713                                         NULL, "no fate action is found");
16714                 } else {
16715                         if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) &&
16716                                 (domain_color[i] &
16717                                 MLX5_MTR_DOMAIN_INGRESS_BIT)) {
16718                                 if ((domain_color[i] &
16719                                         MLX5_MTR_DOMAIN_EGRESS_BIT))
16720                                         domain_color[i] =
16721                                         MLX5_MTR_DOMAIN_EGRESS_BIT;
16722                                 else
16723                                         return -rte_mtr_error_set(error,
16724                                         ENOTSUP,
16725                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
16726                                         NULL, "no fate action is found");
16727                         }
16728                 }
16729                 if (domain_color[i] != def_domain)
16730                         *domain_bitmap = domain_color[i];
16731         }
16732         return 0;
16733 }
16734
16735 static int
16736 flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags)
16737 {
16738         struct mlx5_priv *priv = dev->data->dev_private;
16739         int ret = 0;
16740
16741         if ((domains & MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain != NULL) {
16742                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->rx_domain,
16743                                                 flags);
16744                 if (ret != 0)
16745                         return ret;
16746         }
16747         if ((domains & MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain != NULL) {
16748                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->tx_domain, flags);
16749                 if (ret != 0)
16750                         return ret;
16751         }
16752         if ((domains & MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain != NULL) {
16753                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->fdb_domain, flags);
16754                 if (ret != 0)
16755                         return ret;
16756         }
16757         return 0;
16758 }
16759
16760 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
16761         .validate = flow_dv_validate,
16762         .prepare = flow_dv_prepare,
16763         .translate = flow_dv_translate,
16764         .apply = flow_dv_apply,
16765         .remove = flow_dv_remove,
16766         .destroy = flow_dv_destroy,
16767         .query = flow_dv_query,
16768         .create_mtr_tbls = flow_dv_create_mtr_tbls,
16769         .destroy_mtr_tbls = flow_dv_destroy_mtr_tbls,
16770         .destroy_mtr_drop_tbls = flow_dv_destroy_mtr_drop_tbls,
16771         .create_meter = flow_dv_mtr_alloc,
16772         .free_meter = flow_dv_aso_mtr_release_to_pool,
16773         .validate_mtr_acts = flow_dv_validate_mtr_policy_acts,
16774         .create_mtr_acts = flow_dv_create_mtr_policy_acts,
16775         .destroy_mtr_acts = flow_dv_destroy_mtr_policy_acts,
16776         .create_policy_rules = flow_dv_create_policy_rules,
16777         .destroy_policy_rules = flow_dv_destroy_policy_rules,
16778         .create_def_policy = flow_dv_create_def_policy,
16779         .destroy_def_policy = flow_dv_destroy_def_policy,
16780         .meter_sub_policy_rss_prepare = flow_dv_meter_sub_policy_rss_prepare,
16781         .destroy_sub_policy_with_rxq = flow_dv_destroy_sub_policy_with_rxq,
16782         .counter_alloc = flow_dv_counter_allocate,
16783         .counter_free = flow_dv_counter_free,
16784         .counter_query = flow_dv_counter_query,
16785         .get_aged_flows = flow_get_aged_flows,
16786         .action_validate = flow_dv_action_validate,
16787         .action_create = flow_dv_action_create,
16788         .action_destroy = flow_dv_action_destroy,
16789         .action_update = flow_dv_action_update,
16790         .action_query = flow_dv_action_query,
16791         .sync_domain = flow_dv_sync_domain,
16792 };
16793
16794 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
16795