768d5b15b0ba69a990dd1c42a45ec8a568b6cb63
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <ethdev_driver.h>
14 #include <rte_flow.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_cycles.h>
18 #include <rte_ip.h>
19 #include <rte_gre.h>
20 #include <rte_vxlan.h>
21 #include <rte_gtp.h>
22 #include <rte_eal_paging.h>
23 #include <rte_mpls.h>
24 #include <rte_mtr.h>
25 #include <rte_mtr_driver.h>
26 #include <rte_tailq.h>
27
28 #include <mlx5_glue.h>
29 #include <mlx5_devx_cmds.h>
30 #include <mlx5_prm.h>
31 #include <mlx5_malloc.h>
32
33 #include "mlx5_defs.h"
34 #include "mlx5.h"
35 #include "mlx5_common_os.h"
36 #include "mlx5_flow.h"
37 #include "mlx5_flow_os.h"
38 #include "mlx5_rx.h"
39 #include "mlx5_tx.h"
40 #include "rte_pmd_mlx5.h"
41
42 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
43
44 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
45 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
46 #endif
47
48 #ifndef HAVE_MLX5DV_DR_ESWITCH
49 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
50 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
51 #endif
52 #endif
53
54 #ifndef HAVE_MLX5DV_DR
55 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
56 #endif
57
58 /* VLAN header definitions */
59 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
60 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
61 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
62 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
63 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
64
65 union flow_dv_attr {
66         struct {
67                 uint32_t valid:1;
68                 uint32_t ipv4:1;
69                 uint32_t ipv6:1;
70                 uint32_t tcp:1;
71                 uint32_t udp:1;
72                 uint32_t reserved:27;
73         };
74         uint32_t attr;
75 };
76
77 static int
78 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
79                              struct mlx5_flow_tbl_resource *tbl);
80
81 static int
82 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
83                                      uint32_t encap_decap_idx);
84
85 static int
86 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
87                                         uint32_t port_id);
88 static void
89 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss);
90
91 static int
92 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
93                                   uint32_t rix_jump);
94
95 /**
96  * Initialize flow attributes structure according to flow items' types.
97  *
98  * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel
99  * mode. For tunnel mode, the items to be modified are the outermost ones.
100  *
101  * @param[in] item
102  *   Pointer to item specification.
103  * @param[out] attr
104  *   Pointer to flow attributes structure.
105  * @param[in] dev_flow
106  *   Pointer to the sub flow.
107  * @param[in] tunnel_decap
108  *   Whether action is after tunnel decapsulation.
109  */
110 static void
111 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
112                   struct mlx5_flow *dev_flow, bool tunnel_decap)
113 {
114         uint64_t layers = dev_flow->handle->layers;
115
116         /*
117          * If layers is already initialized, it means this dev_flow is the
118          * suffix flow, the layers flags is set by the prefix flow. Need to
119          * use the layer flags from prefix flow as the suffix flow may not
120          * have the user defined items as the flow is split.
121          */
122         if (layers) {
123                 if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
124                         attr->ipv4 = 1;
125                 else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
126                         attr->ipv6 = 1;
127                 if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
128                         attr->tcp = 1;
129                 else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
130                         attr->udp = 1;
131                 attr->valid = 1;
132                 return;
133         }
134         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
135                 uint8_t next_protocol = 0xff;
136                 switch (item->type) {
137                 case RTE_FLOW_ITEM_TYPE_GRE:
138                 case RTE_FLOW_ITEM_TYPE_NVGRE:
139                 case RTE_FLOW_ITEM_TYPE_VXLAN:
140                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
141                 case RTE_FLOW_ITEM_TYPE_GENEVE:
142                 case RTE_FLOW_ITEM_TYPE_MPLS:
143                         if (tunnel_decap)
144                                 attr->attr = 0;
145                         break;
146                 case RTE_FLOW_ITEM_TYPE_IPV4:
147                         if (!attr->ipv6)
148                                 attr->ipv4 = 1;
149                         if (item->mask != NULL &&
150                             ((const struct rte_flow_item_ipv4 *)
151                             item->mask)->hdr.next_proto_id)
152                                 next_protocol =
153                                     ((const struct rte_flow_item_ipv4 *)
154                                       (item->spec))->hdr.next_proto_id &
155                                     ((const struct rte_flow_item_ipv4 *)
156                                       (item->mask))->hdr.next_proto_id;
157                         if ((next_protocol == IPPROTO_IPIP ||
158                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
159                                 attr->attr = 0;
160                         break;
161                 case RTE_FLOW_ITEM_TYPE_IPV6:
162                         if (!attr->ipv4)
163                                 attr->ipv6 = 1;
164                         if (item->mask != NULL &&
165                             ((const struct rte_flow_item_ipv6 *)
166                             item->mask)->hdr.proto)
167                                 next_protocol =
168                                     ((const struct rte_flow_item_ipv6 *)
169                                       (item->spec))->hdr.proto &
170                                     ((const struct rte_flow_item_ipv6 *)
171                                       (item->mask))->hdr.proto;
172                         if ((next_protocol == IPPROTO_IPIP ||
173                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
174                                 attr->attr = 0;
175                         break;
176                 case RTE_FLOW_ITEM_TYPE_UDP:
177                         if (!attr->tcp)
178                                 attr->udp = 1;
179                         break;
180                 case RTE_FLOW_ITEM_TYPE_TCP:
181                         if (!attr->udp)
182                                 attr->tcp = 1;
183                         break;
184                 default:
185                         break;
186                 }
187         }
188         attr->valid = 1;
189 }
190
191 /**
192  * Convert rte_mtr_color to mlx5 color.
193  *
194  * @param[in] rcol
195  *   rte_mtr_color.
196  *
197  * @return
198  *   mlx5 color.
199  */
200 static int
201 rte_col_2_mlx5_col(enum rte_color rcol)
202 {
203         switch (rcol) {
204         case RTE_COLOR_GREEN:
205                 return MLX5_FLOW_COLOR_GREEN;
206         case RTE_COLOR_YELLOW:
207                 return MLX5_FLOW_COLOR_YELLOW;
208         case RTE_COLOR_RED:
209                 return MLX5_FLOW_COLOR_RED;
210         default:
211                 break;
212         }
213         return MLX5_FLOW_COLOR_UNDEFINED;
214 }
215
216 struct field_modify_info {
217         uint32_t size; /* Size of field in protocol header, in bytes. */
218         uint32_t offset; /* Offset of field in protocol header, in bytes. */
219         enum mlx5_modification_field id;
220 };
221
222 struct field_modify_info modify_eth[] = {
223         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
224         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
225         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
226         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
227         {0, 0, 0},
228 };
229
230 struct field_modify_info modify_vlan_out_first_vid[] = {
231         /* Size in bits !!! */
232         {12, 0, MLX5_MODI_OUT_FIRST_VID},
233         {0, 0, 0},
234 };
235
236 struct field_modify_info modify_ipv4[] = {
237         {1,  1, MLX5_MODI_OUT_IP_DSCP},
238         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
239         {4, 12, MLX5_MODI_OUT_SIPV4},
240         {4, 16, MLX5_MODI_OUT_DIPV4},
241         {0, 0, 0},
242 };
243
244 struct field_modify_info modify_ipv6[] = {
245         {1,  0, MLX5_MODI_OUT_IP_DSCP},
246         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
247         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
248         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
249         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
250         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
251         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
252         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
253         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
254         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
255         {0, 0, 0},
256 };
257
258 struct field_modify_info modify_udp[] = {
259         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
260         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
261         {0, 0, 0},
262 };
263
264 struct field_modify_info modify_tcp[] = {
265         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
266         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
267         {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
268         {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
269         {0, 0, 0},
270 };
271
272 static const struct rte_flow_item *
273 mlx5_flow_find_tunnel_item(const struct rte_flow_item *item)
274 {
275         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
276                 switch (item->type) {
277                 default:
278                         break;
279                 case RTE_FLOW_ITEM_TYPE_VXLAN:
280                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
281                 case RTE_FLOW_ITEM_TYPE_GRE:
282                 case RTE_FLOW_ITEM_TYPE_MPLS:
283                 case RTE_FLOW_ITEM_TYPE_NVGRE:
284                 case RTE_FLOW_ITEM_TYPE_GENEVE:
285                         return item;
286                 case RTE_FLOW_ITEM_TYPE_IPV4:
287                 case RTE_FLOW_ITEM_TYPE_IPV6:
288                         if (item[1].type == RTE_FLOW_ITEM_TYPE_IPV4 ||
289                             item[1].type == RTE_FLOW_ITEM_TYPE_IPV6)
290                                 return item;
291                         break;
292                 }
293         }
294         return NULL;
295 }
296
297 static void
298 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
299                           uint8_t next_protocol, uint64_t *item_flags,
300                           int *tunnel)
301 {
302         MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
303                     item->type == RTE_FLOW_ITEM_TYPE_IPV6);
304         if (next_protocol == IPPROTO_IPIP) {
305                 *item_flags |= MLX5_FLOW_LAYER_IPIP;
306                 *tunnel = 1;
307         }
308         if (next_protocol == IPPROTO_IPV6) {
309                 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
310                 *tunnel = 1;
311         }
312 }
313
314 /* Update VLAN's VID/PCP based on input rte_flow_action.
315  *
316  * @param[in] action
317  *   Pointer to struct rte_flow_action.
318  * @param[out] vlan
319  *   Pointer to struct rte_vlan_hdr.
320  */
321 static void
322 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
323                          struct rte_vlan_hdr *vlan)
324 {
325         uint16_t vlan_tci;
326         if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
327                 vlan_tci =
328                     ((const struct rte_flow_action_of_set_vlan_pcp *)
329                                                action->conf)->vlan_pcp;
330                 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
331                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
332                 vlan->vlan_tci |= vlan_tci;
333         } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
334                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
335                 vlan->vlan_tci |= rte_be_to_cpu_16
336                     (((const struct rte_flow_action_of_set_vlan_vid *)
337                                              action->conf)->vlan_vid);
338         }
339 }
340
341 /**
342  * Fetch 1, 2, 3 or 4 byte field from the byte array
343  * and return as unsigned integer in host-endian format.
344  *
345  * @param[in] data
346  *   Pointer to data array.
347  * @param[in] size
348  *   Size of field to extract.
349  *
350  * @return
351  *   converted field in host endian format.
352  */
353 static inline uint32_t
354 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
355 {
356         uint32_t ret;
357
358         switch (size) {
359         case 1:
360                 ret = *data;
361                 break;
362         case 2:
363                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
364                 break;
365         case 3:
366                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
367                 ret = (ret << 8) | *(data + sizeof(uint16_t));
368                 break;
369         case 4:
370                 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
371                 break;
372         default:
373                 MLX5_ASSERT(false);
374                 ret = 0;
375                 break;
376         }
377         return ret;
378 }
379
380 /**
381  * Convert modify-header action to DV specification.
382  *
383  * Data length of each action is determined by provided field description
384  * and the item mask. Data bit offset and width of each action is determined
385  * by provided item mask.
386  *
387  * @param[in] item
388  *   Pointer to item specification.
389  * @param[in] field
390  *   Pointer to field modification information.
391  *     For MLX5_MODIFICATION_TYPE_SET specifies destination field.
392  *     For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
393  *     For MLX5_MODIFICATION_TYPE_COPY specifies source field.
394  * @param[in] dcopy
395  *   Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
396  *   Negative offset value sets the same offset as source offset.
397  *   size field is ignored, value is taken from source field.
398  * @param[in,out] resource
399  *   Pointer to the modify-header resource.
400  * @param[in] type
401  *   Type of modification.
402  * @param[out] error
403  *   Pointer to the error structure.
404  *
405  * @return
406  *   0 on success, a negative errno value otherwise and rte_errno is set.
407  */
408 static int
409 flow_dv_convert_modify_action(struct rte_flow_item *item,
410                               struct field_modify_info *field,
411                               struct field_modify_info *dcopy,
412                               struct mlx5_flow_dv_modify_hdr_resource *resource,
413                               uint32_t type, struct rte_flow_error *error)
414 {
415         uint32_t i = resource->actions_num;
416         struct mlx5_modification_cmd *actions = resource->actions;
417         uint32_t carry_b = 0;
418
419         /*
420          * The item and mask are provided in big-endian format.
421          * The fields should be presented as in big-endian format either.
422          * Mask must be always present, it defines the actual field width.
423          */
424         MLX5_ASSERT(item->mask);
425         MLX5_ASSERT(field->size);
426         do {
427                 uint32_t size_b;
428                 uint32_t off_b;
429                 uint32_t mask;
430                 uint32_t data;
431                 bool next_field = true;
432                 bool next_dcopy = true;
433
434                 if (i >= MLX5_MAX_MODIFY_NUM)
435                         return rte_flow_error_set(error, EINVAL,
436                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
437                                  "too many items to modify");
438                 /* Fetch variable byte size mask from the array. */
439                 mask = flow_dv_fetch_field((const uint8_t *)item->mask +
440                                            field->offset, field->size);
441                 if (!mask) {
442                         ++field;
443                         continue;
444                 }
445                 /* Deduce actual data width in bits from mask value. */
446                 off_b = rte_bsf32(mask) + carry_b;
447                 size_b = sizeof(uint32_t) * CHAR_BIT -
448                          off_b - __builtin_clz(mask);
449                 MLX5_ASSERT(size_b);
450                 actions[i] = (struct mlx5_modification_cmd) {
451                         .action_type = type,
452                         .field = field->id,
453                         .offset = off_b,
454                         .length = (size_b == sizeof(uint32_t) * CHAR_BIT) ?
455                                 0 : size_b,
456                 };
457                 if (type == MLX5_MODIFICATION_TYPE_COPY) {
458                         MLX5_ASSERT(dcopy);
459                         actions[i].dst_field = dcopy->id;
460                         actions[i].dst_offset =
461                                 (int)dcopy->offset < 0 ? off_b : dcopy->offset;
462                         /* Convert entire record to big-endian format. */
463                         actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
464                         /*
465                          * Destination field overflow. Copy leftovers of
466                          * a source field to the next destination field.
467                          */
468                         carry_b = 0;
469                         if ((size_b > dcopy->size * CHAR_BIT - dcopy->offset) &&
470                             dcopy->size != 0) {
471                                 actions[i].length =
472                                         dcopy->size * CHAR_BIT - dcopy->offset;
473                                 carry_b = actions[i].length;
474                                 next_field = false;
475                         }
476                         /*
477                          * Not enough bits in a source filed to fill a
478                          * destination field. Switch to the next source.
479                          */
480                         if ((size_b < dcopy->size * CHAR_BIT - dcopy->offset) &&
481                             (size_b == field->size * CHAR_BIT - off_b)) {
482                                 actions[i].length =
483                                         field->size * CHAR_BIT - off_b;
484                                 dcopy->offset += actions[i].length;
485                                 next_dcopy = false;
486                         }
487                         if (next_dcopy)
488                                 ++dcopy;
489                 } else {
490                         MLX5_ASSERT(item->spec);
491                         data = flow_dv_fetch_field((const uint8_t *)item->spec +
492                                                    field->offset, field->size);
493                         /* Shift out the trailing masked bits from data. */
494                         data = (data & mask) >> off_b;
495                         actions[i].data1 = rte_cpu_to_be_32(data);
496                 }
497                 /* Convert entire record to expected big-endian format. */
498                 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
499                 if (next_field)
500                         ++field;
501                 ++i;
502         } while (field->size);
503         if (resource->actions_num == i)
504                 return rte_flow_error_set(error, EINVAL,
505                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
506                                           "invalid modification flow item");
507         resource->actions_num = i;
508         return 0;
509 }
510
511 /**
512  * Convert modify-header set IPv4 address action to DV specification.
513  *
514  * @param[in,out] resource
515  *   Pointer to the modify-header resource.
516  * @param[in] action
517  *   Pointer to action specification.
518  * @param[out] error
519  *   Pointer to the error structure.
520  *
521  * @return
522  *   0 on success, a negative errno value otherwise and rte_errno is set.
523  */
524 static int
525 flow_dv_convert_action_modify_ipv4
526                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
527                          const struct rte_flow_action *action,
528                          struct rte_flow_error *error)
529 {
530         const struct rte_flow_action_set_ipv4 *conf =
531                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
532         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
533         struct rte_flow_item_ipv4 ipv4;
534         struct rte_flow_item_ipv4 ipv4_mask;
535
536         memset(&ipv4, 0, sizeof(ipv4));
537         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
538         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
539                 ipv4.hdr.src_addr = conf->ipv4_addr;
540                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
541         } else {
542                 ipv4.hdr.dst_addr = conf->ipv4_addr;
543                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
544         }
545         item.spec = &ipv4;
546         item.mask = &ipv4_mask;
547         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
548                                              MLX5_MODIFICATION_TYPE_SET, error);
549 }
550
551 /**
552  * Convert modify-header set IPv6 address action to DV specification.
553  *
554  * @param[in,out] resource
555  *   Pointer to the modify-header resource.
556  * @param[in] action
557  *   Pointer to action specification.
558  * @param[out] error
559  *   Pointer to the error structure.
560  *
561  * @return
562  *   0 on success, a negative errno value otherwise and rte_errno is set.
563  */
564 static int
565 flow_dv_convert_action_modify_ipv6
566                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
567                          const struct rte_flow_action *action,
568                          struct rte_flow_error *error)
569 {
570         const struct rte_flow_action_set_ipv6 *conf =
571                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
572         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
573         struct rte_flow_item_ipv6 ipv6;
574         struct rte_flow_item_ipv6 ipv6_mask;
575
576         memset(&ipv6, 0, sizeof(ipv6));
577         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
578         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
579                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
580                        sizeof(ipv6.hdr.src_addr));
581                 memcpy(&ipv6_mask.hdr.src_addr,
582                        &rte_flow_item_ipv6_mask.hdr.src_addr,
583                        sizeof(ipv6.hdr.src_addr));
584         } else {
585                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
586                        sizeof(ipv6.hdr.dst_addr));
587                 memcpy(&ipv6_mask.hdr.dst_addr,
588                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
589                        sizeof(ipv6.hdr.dst_addr));
590         }
591         item.spec = &ipv6;
592         item.mask = &ipv6_mask;
593         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
594                                              MLX5_MODIFICATION_TYPE_SET, error);
595 }
596
597 /**
598  * Convert modify-header set MAC address action to DV specification.
599  *
600  * @param[in,out] resource
601  *   Pointer to the modify-header resource.
602  * @param[in] action
603  *   Pointer to action specification.
604  * @param[out] error
605  *   Pointer to the error structure.
606  *
607  * @return
608  *   0 on success, a negative errno value otherwise and rte_errno is set.
609  */
610 static int
611 flow_dv_convert_action_modify_mac
612                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
613                          const struct rte_flow_action *action,
614                          struct rte_flow_error *error)
615 {
616         const struct rte_flow_action_set_mac *conf =
617                 (const struct rte_flow_action_set_mac *)(action->conf);
618         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
619         struct rte_flow_item_eth eth;
620         struct rte_flow_item_eth eth_mask;
621
622         memset(&eth, 0, sizeof(eth));
623         memset(&eth_mask, 0, sizeof(eth_mask));
624         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
625                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
626                        sizeof(eth.src.addr_bytes));
627                 memcpy(&eth_mask.src.addr_bytes,
628                        &rte_flow_item_eth_mask.src.addr_bytes,
629                        sizeof(eth_mask.src.addr_bytes));
630         } else {
631                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
632                        sizeof(eth.dst.addr_bytes));
633                 memcpy(&eth_mask.dst.addr_bytes,
634                        &rte_flow_item_eth_mask.dst.addr_bytes,
635                        sizeof(eth_mask.dst.addr_bytes));
636         }
637         item.spec = &eth;
638         item.mask = &eth_mask;
639         return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
640                                              MLX5_MODIFICATION_TYPE_SET, error);
641 }
642
643 /**
644  * Convert modify-header set VLAN VID action to DV specification.
645  *
646  * @param[in,out] resource
647  *   Pointer to the modify-header resource.
648  * @param[in] action
649  *   Pointer to action specification.
650  * @param[out] error
651  *   Pointer to the error structure.
652  *
653  * @return
654  *   0 on success, a negative errno value otherwise and rte_errno is set.
655  */
656 static int
657 flow_dv_convert_action_modify_vlan_vid
658                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
659                          const struct rte_flow_action *action,
660                          struct rte_flow_error *error)
661 {
662         const struct rte_flow_action_of_set_vlan_vid *conf =
663                 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
664         int i = resource->actions_num;
665         struct mlx5_modification_cmd *actions = resource->actions;
666         struct field_modify_info *field = modify_vlan_out_first_vid;
667
668         if (i >= MLX5_MAX_MODIFY_NUM)
669                 return rte_flow_error_set(error, EINVAL,
670                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
671                          "too many items to modify");
672         actions[i] = (struct mlx5_modification_cmd) {
673                 .action_type = MLX5_MODIFICATION_TYPE_SET,
674                 .field = field->id,
675                 .length = field->size,
676                 .offset = field->offset,
677         };
678         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
679         actions[i].data1 = conf->vlan_vid;
680         actions[i].data1 = actions[i].data1 << 16;
681         resource->actions_num = ++i;
682         return 0;
683 }
684
685 /**
686  * Convert modify-header set TP action to DV specification.
687  *
688  * @param[in,out] resource
689  *   Pointer to the modify-header resource.
690  * @param[in] action
691  *   Pointer to action specification.
692  * @param[in] items
693  *   Pointer to rte_flow_item objects list.
694  * @param[in] attr
695  *   Pointer to flow attributes structure.
696  * @param[in] dev_flow
697  *   Pointer to the sub flow.
698  * @param[in] tunnel_decap
699  *   Whether action is after tunnel decapsulation.
700  * @param[out] error
701  *   Pointer to the error structure.
702  *
703  * @return
704  *   0 on success, a negative errno value otherwise and rte_errno is set.
705  */
706 static int
707 flow_dv_convert_action_modify_tp
708                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
709                          const struct rte_flow_action *action,
710                          const struct rte_flow_item *items,
711                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
712                          bool tunnel_decap, struct rte_flow_error *error)
713 {
714         const struct rte_flow_action_set_tp *conf =
715                 (const struct rte_flow_action_set_tp *)(action->conf);
716         struct rte_flow_item item;
717         struct rte_flow_item_udp udp;
718         struct rte_flow_item_udp udp_mask;
719         struct rte_flow_item_tcp tcp;
720         struct rte_flow_item_tcp tcp_mask;
721         struct field_modify_info *field;
722
723         if (!attr->valid)
724                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
725         if (attr->udp) {
726                 memset(&udp, 0, sizeof(udp));
727                 memset(&udp_mask, 0, sizeof(udp_mask));
728                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
729                         udp.hdr.src_port = conf->port;
730                         udp_mask.hdr.src_port =
731                                         rte_flow_item_udp_mask.hdr.src_port;
732                 } else {
733                         udp.hdr.dst_port = conf->port;
734                         udp_mask.hdr.dst_port =
735                                         rte_flow_item_udp_mask.hdr.dst_port;
736                 }
737                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
738                 item.spec = &udp;
739                 item.mask = &udp_mask;
740                 field = modify_udp;
741         } else {
742                 MLX5_ASSERT(attr->tcp);
743                 memset(&tcp, 0, sizeof(tcp));
744                 memset(&tcp_mask, 0, sizeof(tcp_mask));
745                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
746                         tcp.hdr.src_port = conf->port;
747                         tcp_mask.hdr.src_port =
748                                         rte_flow_item_tcp_mask.hdr.src_port;
749                 } else {
750                         tcp.hdr.dst_port = conf->port;
751                         tcp_mask.hdr.dst_port =
752                                         rte_flow_item_tcp_mask.hdr.dst_port;
753                 }
754                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
755                 item.spec = &tcp;
756                 item.mask = &tcp_mask;
757                 field = modify_tcp;
758         }
759         return flow_dv_convert_modify_action(&item, field, NULL, resource,
760                                              MLX5_MODIFICATION_TYPE_SET, error);
761 }
762
763 /**
764  * Convert modify-header set TTL action to DV specification.
765  *
766  * @param[in,out] resource
767  *   Pointer to the modify-header resource.
768  * @param[in] action
769  *   Pointer to action specification.
770  * @param[in] items
771  *   Pointer to rte_flow_item objects list.
772  * @param[in] attr
773  *   Pointer to flow attributes structure.
774  * @param[in] dev_flow
775  *   Pointer to the sub flow.
776  * @param[in] tunnel_decap
777  *   Whether action is after tunnel decapsulation.
778  * @param[out] error
779  *   Pointer to the error structure.
780  *
781  * @return
782  *   0 on success, a negative errno value otherwise and rte_errno is set.
783  */
784 static int
785 flow_dv_convert_action_modify_ttl
786                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
787                          const struct rte_flow_action *action,
788                          const struct rte_flow_item *items,
789                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
790                          bool tunnel_decap, struct rte_flow_error *error)
791 {
792         const struct rte_flow_action_set_ttl *conf =
793                 (const struct rte_flow_action_set_ttl *)(action->conf);
794         struct rte_flow_item item;
795         struct rte_flow_item_ipv4 ipv4;
796         struct rte_flow_item_ipv4 ipv4_mask;
797         struct rte_flow_item_ipv6 ipv6;
798         struct rte_flow_item_ipv6 ipv6_mask;
799         struct field_modify_info *field;
800
801         if (!attr->valid)
802                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
803         if (attr->ipv4) {
804                 memset(&ipv4, 0, sizeof(ipv4));
805                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
806                 ipv4.hdr.time_to_live = conf->ttl_value;
807                 ipv4_mask.hdr.time_to_live = 0xFF;
808                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
809                 item.spec = &ipv4;
810                 item.mask = &ipv4_mask;
811                 field = modify_ipv4;
812         } else {
813                 MLX5_ASSERT(attr->ipv6);
814                 memset(&ipv6, 0, sizeof(ipv6));
815                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
816                 ipv6.hdr.hop_limits = conf->ttl_value;
817                 ipv6_mask.hdr.hop_limits = 0xFF;
818                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
819                 item.spec = &ipv6;
820                 item.mask = &ipv6_mask;
821                 field = modify_ipv6;
822         }
823         return flow_dv_convert_modify_action(&item, field, NULL, resource,
824                                              MLX5_MODIFICATION_TYPE_SET, error);
825 }
826
827 /**
828  * Convert modify-header decrement TTL action to DV specification.
829  *
830  * @param[in,out] resource
831  *   Pointer to the modify-header resource.
832  * @param[in] action
833  *   Pointer to action specification.
834  * @param[in] items
835  *   Pointer to rte_flow_item objects list.
836  * @param[in] attr
837  *   Pointer to flow attributes structure.
838  * @param[in] dev_flow
839  *   Pointer to the sub flow.
840  * @param[in] tunnel_decap
841  *   Whether action is after tunnel decapsulation.
842  * @param[out] error
843  *   Pointer to the error structure.
844  *
845  * @return
846  *   0 on success, a negative errno value otherwise and rte_errno is set.
847  */
848 static int
849 flow_dv_convert_action_modify_dec_ttl
850                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
851                          const struct rte_flow_item *items,
852                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
853                          bool tunnel_decap, struct rte_flow_error *error)
854 {
855         struct rte_flow_item item;
856         struct rte_flow_item_ipv4 ipv4;
857         struct rte_flow_item_ipv4 ipv4_mask;
858         struct rte_flow_item_ipv6 ipv6;
859         struct rte_flow_item_ipv6 ipv6_mask;
860         struct field_modify_info *field;
861
862         if (!attr->valid)
863                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
864         if (attr->ipv4) {
865                 memset(&ipv4, 0, sizeof(ipv4));
866                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
867                 ipv4.hdr.time_to_live = 0xFF;
868                 ipv4_mask.hdr.time_to_live = 0xFF;
869                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
870                 item.spec = &ipv4;
871                 item.mask = &ipv4_mask;
872                 field = modify_ipv4;
873         } else {
874                 MLX5_ASSERT(attr->ipv6);
875                 memset(&ipv6, 0, sizeof(ipv6));
876                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
877                 ipv6.hdr.hop_limits = 0xFF;
878                 ipv6_mask.hdr.hop_limits = 0xFF;
879                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
880                 item.spec = &ipv6;
881                 item.mask = &ipv6_mask;
882                 field = modify_ipv6;
883         }
884         return flow_dv_convert_modify_action(&item, field, NULL, resource,
885                                              MLX5_MODIFICATION_TYPE_ADD, error);
886 }
887
888 /**
889  * Convert modify-header increment/decrement TCP Sequence number
890  * to DV specification.
891  *
892  * @param[in,out] resource
893  *   Pointer to the modify-header resource.
894  * @param[in] action
895  *   Pointer to action specification.
896  * @param[out] error
897  *   Pointer to the error structure.
898  *
899  * @return
900  *   0 on success, a negative errno value otherwise and rte_errno is set.
901  */
902 static int
903 flow_dv_convert_action_modify_tcp_seq
904                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
905                          const struct rte_flow_action *action,
906                          struct rte_flow_error *error)
907 {
908         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
909         uint64_t value = rte_be_to_cpu_32(*conf);
910         struct rte_flow_item item;
911         struct rte_flow_item_tcp tcp;
912         struct rte_flow_item_tcp tcp_mask;
913
914         memset(&tcp, 0, sizeof(tcp));
915         memset(&tcp_mask, 0, sizeof(tcp_mask));
916         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
917                 /*
918                  * The HW has no decrement operation, only increment operation.
919                  * To simulate decrement X from Y using increment operation
920                  * we need to add UINT32_MAX X times to Y.
921                  * Each adding of UINT32_MAX decrements Y by 1.
922                  */
923                 value *= UINT32_MAX;
924         tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
925         tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
926         item.type = RTE_FLOW_ITEM_TYPE_TCP;
927         item.spec = &tcp;
928         item.mask = &tcp_mask;
929         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
930                                              MLX5_MODIFICATION_TYPE_ADD, error);
931 }
932
933 /**
934  * Convert modify-header increment/decrement TCP Acknowledgment number
935  * to DV specification.
936  *
937  * @param[in,out] resource
938  *   Pointer to the modify-header resource.
939  * @param[in] action
940  *   Pointer to action specification.
941  * @param[out] error
942  *   Pointer to the error structure.
943  *
944  * @return
945  *   0 on success, a negative errno value otherwise and rte_errno is set.
946  */
947 static int
948 flow_dv_convert_action_modify_tcp_ack
949                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
950                          const struct rte_flow_action *action,
951                          struct rte_flow_error *error)
952 {
953         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
954         uint64_t value = rte_be_to_cpu_32(*conf);
955         struct rte_flow_item item;
956         struct rte_flow_item_tcp tcp;
957         struct rte_flow_item_tcp tcp_mask;
958
959         memset(&tcp, 0, sizeof(tcp));
960         memset(&tcp_mask, 0, sizeof(tcp_mask));
961         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
962                 /*
963                  * The HW has no decrement operation, only increment operation.
964                  * To simulate decrement X from Y using increment operation
965                  * we need to add UINT32_MAX X times to Y.
966                  * Each adding of UINT32_MAX decrements Y by 1.
967                  */
968                 value *= UINT32_MAX;
969         tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
970         tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
971         item.type = RTE_FLOW_ITEM_TYPE_TCP;
972         item.spec = &tcp;
973         item.mask = &tcp_mask;
974         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
975                                              MLX5_MODIFICATION_TYPE_ADD, error);
976 }
977
978 static enum mlx5_modification_field reg_to_field[] = {
979         [REG_NON] = MLX5_MODI_OUT_NONE,
980         [REG_A] = MLX5_MODI_META_DATA_REG_A,
981         [REG_B] = MLX5_MODI_META_DATA_REG_B,
982         [REG_C_0] = MLX5_MODI_META_REG_C_0,
983         [REG_C_1] = MLX5_MODI_META_REG_C_1,
984         [REG_C_2] = MLX5_MODI_META_REG_C_2,
985         [REG_C_3] = MLX5_MODI_META_REG_C_3,
986         [REG_C_4] = MLX5_MODI_META_REG_C_4,
987         [REG_C_5] = MLX5_MODI_META_REG_C_5,
988         [REG_C_6] = MLX5_MODI_META_REG_C_6,
989         [REG_C_7] = MLX5_MODI_META_REG_C_7,
990 };
991
992 /**
993  * Convert register set to DV specification.
994  *
995  * @param[in,out] resource
996  *   Pointer to the modify-header resource.
997  * @param[in] action
998  *   Pointer to action specification.
999  * @param[out] error
1000  *   Pointer to the error structure.
1001  *
1002  * @return
1003  *   0 on success, a negative errno value otherwise and rte_errno is set.
1004  */
1005 static int
1006 flow_dv_convert_action_set_reg
1007                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1008                          const struct rte_flow_action *action,
1009                          struct rte_flow_error *error)
1010 {
1011         const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
1012         struct mlx5_modification_cmd *actions = resource->actions;
1013         uint32_t i = resource->actions_num;
1014
1015         if (i >= MLX5_MAX_MODIFY_NUM)
1016                 return rte_flow_error_set(error, EINVAL,
1017                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1018                                           "too many items to modify");
1019         MLX5_ASSERT(conf->id != REG_NON);
1020         MLX5_ASSERT(conf->id < (enum modify_reg)RTE_DIM(reg_to_field));
1021         actions[i] = (struct mlx5_modification_cmd) {
1022                 .action_type = MLX5_MODIFICATION_TYPE_SET,
1023                 .field = reg_to_field[conf->id],
1024                 .offset = conf->offset,
1025                 .length = conf->length,
1026         };
1027         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
1028         actions[i].data1 = rte_cpu_to_be_32(conf->data);
1029         ++i;
1030         resource->actions_num = i;
1031         return 0;
1032 }
1033
1034 /**
1035  * Convert SET_TAG action to DV specification.
1036  *
1037  * @param[in] dev
1038  *   Pointer to the rte_eth_dev structure.
1039  * @param[in,out] resource
1040  *   Pointer to the modify-header resource.
1041  * @param[in] conf
1042  *   Pointer to action specification.
1043  * @param[out] error
1044  *   Pointer to the error structure.
1045  *
1046  * @return
1047  *   0 on success, a negative errno value otherwise and rte_errno is set.
1048  */
1049 static int
1050 flow_dv_convert_action_set_tag
1051                         (struct rte_eth_dev *dev,
1052                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1053                          const struct rte_flow_action_set_tag *conf,
1054                          struct rte_flow_error *error)
1055 {
1056         rte_be32_t data = rte_cpu_to_be_32(conf->data);
1057         rte_be32_t mask = rte_cpu_to_be_32(conf->mask);
1058         struct rte_flow_item item = {
1059                 .spec = &data,
1060                 .mask = &mask,
1061         };
1062         struct field_modify_info reg_c_x[] = {
1063                 [1] = {0, 0, 0},
1064         };
1065         enum mlx5_modification_field reg_type;
1066         int ret;
1067
1068         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
1069         if (ret < 0)
1070                 return ret;
1071         MLX5_ASSERT(ret != REG_NON);
1072         MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
1073         reg_type = reg_to_field[ret];
1074         MLX5_ASSERT(reg_type > 0);
1075         reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
1076         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1077                                              MLX5_MODIFICATION_TYPE_SET, error);
1078 }
1079
1080 /**
1081  * Convert internal COPY_REG action to DV specification.
1082  *
1083  * @param[in] dev
1084  *   Pointer to the rte_eth_dev structure.
1085  * @param[in,out] res
1086  *   Pointer to the modify-header resource.
1087  * @param[in] action
1088  *   Pointer to action specification.
1089  * @param[out] error
1090  *   Pointer to the error structure.
1091  *
1092  * @return
1093  *   0 on success, a negative errno value otherwise and rte_errno is set.
1094  */
1095 static int
1096 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
1097                                  struct mlx5_flow_dv_modify_hdr_resource *res,
1098                                  const struct rte_flow_action *action,
1099                                  struct rte_flow_error *error)
1100 {
1101         const struct mlx5_flow_action_copy_mreg *conf = action->conf;
1102         rte_be32_t mask = RTE_BE32(UINT32_MAX);
1103         struct rte_flow_item item = {
1104                 .spec = NULL,
1105                 .mask = &mask,
1106         };
1107         struct field_modify_info reg_src[] = {
1108                 {4, 0, reg_to_field[conf->src]},
1109                 {0, 0, 0},
1110         };
1111         struct field_modify_info reg_dst = {
1112                 .offset = 0,
1113                 .id = reg_to_field[conf->dst],
1114         };
1115         /* Adjust reg_c[0] usage according to reported mask. */
1116         if (conf->dst == REG_C_0 || conf->src == REG_C_0) {
1117                 struct mlx5_priv *priv = dev->data->dev_private;
1118                 uint32_t reg_c0 = priv->sh->dv_regc0_mask;
1119
1120                 MLX5_ASSERT(reg_c0);
1121                 MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
1122                 if (conf->dst == REG_C_0) {
1123                         /* Copy to reg_c[0], within mask only. */
1124                         reg_dst.offset = rte_bsf32(reg_c0);
1125                         /*
1126                          * Mask is ignoring the enianness, because
1127                          * there is no conversion in datapath.
1128                          */
1129 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1130                         /* Copy from destination lower bits to reg_c[0]. */
1131                         mask = reg_c0 >> reg_dst.offset;
1132 #else
1133                         /* Copy from destination upper bits to reg_c[0]. */
1134                         mask = reg_c0 << (sizeof(reg_c0) * CHAR_BIT -
1135                                           rte_fls_u32(reg_c0));
1136 #endif
1137                 } else {
1138                         mask = rte_cpu_to_be_32(reg_c0);
1139 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1140                         /* Copy from reg_c[0] to destination lower bits. */
1141                         reg_dst.offset = 0;
1142 #else
1143                         /* Copy from reg_c[0] to destination upper bits. */
1144                         reg_dst.offset = sizeof(reg_c0) * CHAR_BIT -
1145                                          (rte_fls_u32(reg_c0) -
1146                                           rte_bsf32(reg_c0));
1147 #endif
1148                 }
1149         }
1150         return flow_dv_convert_modify_action(&item,
1151                                              reg_src, &reg_dst, res,
1152                                              MLX5_MODIFICATION_TYPE_COPY,
1153                                              error);
1154 }
1155
1156 /**
1157  * Convert MARK action to DV specification. This routine is used
1158  * in extensive metadata only and requires metadata register to be
1159  * handled. In legacy mode hardware tag resource is engaged.
1160  *
1161  * @param[in] dev
1162  *   Pointer to the rte_eth_dev structure.
1163  * @param[in] conf
1164  *   Pointer to MARK action specification.
1165  * @param[in,out] resource
1166  *   Pointer to the modify-header resource.
1167  * @param[out] error
1168  *   Pointer to the error structure.
1169  *
1170  * @return
1171  *   0 on success, a negative errno value otherwise and rte_errno is set.
1172  */
1173 static int
1174 flow_dv_convert_action_mark(struct rte_eth_dev *dev,
1175                             const struct rte_flow_action_mark *conf,
1176                             struct mlx5_flow_dv_modify_hdr_resource *resource,
1177                             struct rte_flow_error *error)
1178 {
1179         struct mlx5_priv *priv = dev->data->dev_private;
1180         rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK &
1181                                            priv->sh->dv_mark_mask);
1182         rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask;
1183         struct rte_flow_item item = {
1184                 .spec = &data,
1185                 .mask = &mask,
1186         };
1187         struct field_modify_info reg_c_x[] = {
1188                 [1] = {0, 0, 0},
1189         };
1190         int reg;
1191
1192         if (!mask)
1193                 return rte_flow_error_set(error, EINVAL,
1194                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1195                                           NULL, "zero mark action mask");
1196         reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1197         if (reg < 0)
1198                 return reg;
1199         MLX5_ASSERT(reg > 0);
1200         if (reg == REG_C_0) {
1201                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1202                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1203
1204                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1205                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1206                 mask = rte_cpu_to_be_32(mask << shl_c0);
1207         }
1208         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1209         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1210                                              MLX5_MODIFICATION_TYPE_SET, error);
1211 }
1212
1213 /**
1214  * Get metadata register index for specified steering domain.
1215  *
1216  * @param[in] dev
1217  *   Pointer to the rte_eth_dev structure.
1218  * @param[in] attr
1219  *   Attributes of flow to determine steering domain.
1220  * @param[out] error
1221  *   Pointer to the error structure.
1222  *
1223  * @return
1224  *   positive index on success, a negative errno value otherwise
1225  *   and rte_errno is set.
1226  */
1227 static enum modify_reg
1228 flow_dv_get_metadata_reg(struct rte_eth_dev *dev,
1229                          const struct rte_flow_attr *attr,
1230                          struct rte_flow_error *error)
1231 {
1232         int reg =
1233                 mlx5_flow_get_reg_id(dev, attr->transfer ?
1234                                           MLX5_METADATA_FDB :
1235                                             attr->egress ?
1236                                             MLX5_METADATA_TX :
1237                                             MLX5_METADATA_RX, 0, error);
1238         if (reg < 0)
1239                 return rte_flow_error_set(error,
1240                                           ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1241                                           NULL, "unavailable "
1242                                           "metadata register");
1243         return reg;
1244 }
1245
1246 /**
1247  * Convert SET_META action to DV specification.
1248  *
1249  * @param[in] dev
1250  *   Pointer to the rte_eth_dev structure.
1251  * @param[in,out] resource
1252  *   Pointer to the modify-header resource.
1253  * @param[in] attr
1254  *   Attributes of flow that includes this item.
1255  * @param[in] conf
1256  *   Pointer to action specification.
1257  * @param[out] error
1258  *   Pointer to the error structure.
1259  *
1260  * @return
1261  *   0 on success, a negative errno value otherwise and rte_errno is set.
1262  */
1263 static int
1264 flow_dv_convert_action_set_meta
1265                         (struct rte_eth_dev *dev,
1266                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1267                          const struct rte_flow_attr *attr,
1268                          const struct rte_flow_action_set_meta *conf,
1269                          struct rte_flow_error *error)
1270 {
1271         uint32_t mask = rte_cpu_to_be_32(conf->mask);
1272         uint32_t data = rte_cpu_to_be_32(conf->data) & mask;
1273         struct rte_flow_item item = {
1274                 .spec = &data,
1275                 .mask = &mask,
1276         };
1277         struct field_modify_info reg_c_x[] = {
1278                 [1] = {0, 0, 0},
1279         };
1280         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1281
1282         if (reg < 0)
1283                 return reg;
1284         MLX5_ASSERT(reg != REG_NON);
1285         if (reg == REG_C_0) {
1286                 struct mlx5_priv *priv = dev->data->dev_private;
1287                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1288                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1289
1290                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1291                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1292                 mask = rte_cpu_to_be_32(mask << shl_c0);
1293         }
1294         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1295         /* The routine expects parameters in memory as big-endian ones. */
1296         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1297                                              MLX5_MODIFICATION_TYPE_SET, error);
1298 }
1299
1300 /**
1301  * Convert modify-header set IPv4 DSCP action to DV specification.
1302  *
1303  * @param[in,out] resource
1304  *   Pointer to the modify-header resource.
1305  * @param[in] action
1306  *   Pointer to action specification.
1307  * @param[out] error
1308  *   Pointer to the error structure.
1309  *
1310  * @return
1311  *   0 on success, a negative errno value otherwise and rte_errno is set.
1312  */
1313 static int
1314 flow_dv_convert_action_modify_ipv4_dscp
1315                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1316                          const struct rte_flow_action *action,
1317                          struct rte_flow_error *error)
1318 {
1319         const struct rte_flow_action_set_dscp *conf =
1320                 (const struct rte_flow_action_set_dscp *)(action->conf);
1321         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
1322         struct rte_flow_item_ipv4 ipv4;
1323         struct rte_flow_item_ipv4 ipv4_mask;
1324
1325         memset(&ipv4, 0, sizeof(ipv4));
1326         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
1327         ipv4.hdr.type_of_service = conf->dscp;
1328         ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2;
1329         item.spec = &ipv4;
1330         item.mask = &ipv4_mask;
1331         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
1332                                              MLX5_MODIFICATION_TYPE_SET, error);
1333 }
1334
1335 /**
1336  * Convert modify-header set IPv6 DSCP action to DV specification.
1337  *
1338  * @param[in,out] resource
1339  *   Pointer to the modify-header resource.
1340  * @param[in] action
1341  *   Pointer to action specification.
1342  * @param[out] error
1343  *   Pointer to the error structure.
1344  *
1345  * @return
1346  *   0 on success, a negative errno value otherwise and rte_errno is set.
1347  */
1348 static int
1349 flow_dv_convert_action_modify_ipv6_dscp
1350                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1351                          const struct rte_flow_action *action,
1352                          struct rte_flow_error *error)
1353 {
1354         const struct rte_flow_action_set_dscp *conf =
1355                 (const struct rte_flow_action_set_dscp *)(action->conf);
1356         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
1357         struct rte_flow_item_ipv6 ipv6;
1358         struct rte_flow_item_ipv6 ipv6_mask;
1359
1360         memset(&ipv6, 0, sizeof(ipv6));
1361         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
1362         /*
1363          * Even though the DSCP bits offset of IPv6 is not byte aligned,
1364          * rdma-core only accept the DSCP bits byte aligned start from
1365          * bit 0 to 5 as to be compatible with IPv4. No need to shift the
1366          * bits in IPv6 case as rdma-core requires byte aligned value.
1367          */
1368         ipv6.hdr.vtc_flow = conf->dscp;
1369         ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22;
1370         item.spec = &ipv6;
1371         item.mask = &ipv6_mask;
1372         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
1373                                              MLX5_MODIFICATION_TYPE_SET, error);
1374 }
1375
1376 static int
1377 mlx5_flow_item_field_width(struct mlx5_dev_config *config,
1378                            enum rte_flow_field_id field)
1379 {
1380         switch (field) {
1381         case RTE_FLOW_FIELD_START:
1382                 return 32;
1383         case RTE_FLOW_FIELD_MAC_DST:
1384         case RTE_FLOW_FIELD_MAC_SRC:
1385                 return 48;
1386         case RTE_FLOW_FIELD_VLAN_TYPE:
1387                 return 16;
1388         case RTE_FLOW_FIELD_VLAN_ID:
1389                 return 12;
1390         case RTE_FLOW_FIELD_MAC_TYPE:
1391                 return 16;
1392         case RTE_FLOW_FIELD_IPV4_DSCP:
1393                 return 6;
1394         case RTE_FLOW_FIELD_IPV4_TTL:
1395                 return 8;
1396         case RTE_FLOW_FIELD_IPV4_SRC:
1397         case RTE_FLOW_FIELD_IPV4_DST:
1398                 return 32;
1399         case RTE_FLOW_FIELD_IPV6_DSCP:
1400                 return 6;
1401         case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1402                 return 8;
1403         case RTE_FLOW_FIELD_IPV6_SRC:
1404         case RTE_FLOW_FIELD_IPV6_DST:
1405                 return 128;
1406         case RTE_FLOW_FIELD_TCP_PORT_SRC:
1407         case RTE_FLOW_FIELD_TCP_PORT_DST:
1408                 return 16;
1409         case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1410         case RTE_FLOW_FIELD_TCP_ACK_NUM:
1411                 return 32;
1412         case RTE_FLOW_FIELD_TCP_FLAGS:
1413                 return 9;
1414         case RTE_FLOW_FIELD_UDP_PORT_SRC:
1415         case RTE_FLOW_FIELD_UDP_PORT_DST:
1416                 return 16;
1417         case RTE_FLOW_FIELD_VXLAN_VNI:
1418         case RTE_FLOW_FIELD_GENEVE_VNI:
1419                 return 24;
1420         case RTE_FLOW_FIELD_GTP_TEID:
1421         case RTE_FLOW_FIELD_TAG:
1422                 return 32;
1423         case RTE_FLOW_FIELD_MARK:
1424                 return 24;
1425         case RTE_FLOW_FIELD_META:
1426                 if (config->dv_xmeta_en == MLX5_XMETA_MODE_META16)
1427                         return 16;
1428                 else if (config->dv_xmeta_en == MLX5_XMETA_MODE_META32)
1429                         return 32;
1430                 else
1431                         return 0;
1432         case RTE_FLOW_FIELD_POINTER:
1433         case RTE_FLOW_FIELD_VALUE:
1434                 return 64;
1435         default:
1436                 MLX5_ASSERT(false);
1437         }
1438         return 0;
1439 }
1440
1441 static void
1442 mlx5_flow_field_id_to_modify_info
1443                 (const struct rte_flow_action_modify_data *data,
1444                  struct field_modify_info *info,
1445                  uint32_t *mask, uint32_t *value,
1446                  uint32_t width, uint32_t dst_width,
1447                  struct rte_eth_dev *dev,
1448                  const struct rte_flow_attr *attr,
1449                  struct rte_flow_error *error)
1450 {
1451         struct mlx5_priv *priv = dev->data->dev_private;
1452         struct mlx5_dev_config *config = &priv->config;
1453         uint32_t idx = 0;
1454         uint32_t off = 0;
1455         uint64_t val = 0;
1456         switch (data->field) {
1457         case RTE_FLOW_FIELD_START:
1458                 /* not supported yet */
1459                 MLX5_ASSERT(false);
1460                 break;
1461         case RTE_FLOW_FIELD_MAC_DST:
1462                 off = data->offset > 16 ? data->offset - 16 : 0;
1463                 if (mask) {
1464                         if (data->offset < 16) {
1465                                 info[idx] = (struct field_modify_info){2, 0,
1466                                                 MLX5_MODI_OUT_DMAC_15_0};
1467                                 if (width < 16) {
1468                                         mask[idx] = rte_cpu_to_be_16(0xffff >>
1469                                                                  (16 - width));
1470                                         width = 0;
1471                                 } else {
1472                                         mask[idx] = RTE_BE16(0xffff);
1473                                         width -= 16;
1474                                 }
1475                                 if (!width)
1476                                         break;
1477                                 ++idx;
1478                         }
1479                         info[idx] = (struct field_modify_info){4, 4 * idx,
1480                                                 MLX5_MODI_OUT_DMAC_47_16};
1481                         mask[idx] = rte_cpu_to_be_32((0xffffffff >>
1482                                                       (32 - width)) << off);
1483                 } else {
1484                         if (data->offset < 16)
1485                                 info[idx++] = (struct field_modify_info){2, 0,
1486                                                 MLX5_MODI_OUT_DMAC_15_0};
1487                         info[idx] = (struct field_modify_info){4, off,
1488                                                 MLX5_MODI_OUT_DMAC_47_16};
1489                 }
1490                 break;
1491         case RTE_FLOW_FIELD_MAC_SRC:
1492                 off = data->offset > 16 ? data->offset - 16 : 0;
1493                 if (mask) {
1494                         if (data->offset < 16) {
1495                                 info[idx] = (struct field_modify_info){2, 0,
1496                                                 MLX5_MODI_OUT_SMAC_15_0};
1497                                 if (width < 16) {
1498                                         mask[idx] = rte_cpu_to_be_16(0xffff >>
1499                                                                  (16 - width));
1500                                         width = 0;
1501                                 } else {
1502                                         mask[idx] = RTE_BE16(0xffff);
1503                                         width -= 16;
1504                                 }
1505                                 if (!width)
1506                                         break;
1507                                 ++idx;
1508                         }
1509                         info[idx] = (struct field_modify_info){4, 4 * idx,
1510                                                 MLX5_MODI_OUT_SMAC_47_16};
1511                         mask[idx] = rte_cpu_to_be_32((0xffffffff >>
1512                                                       (32 - width)) << off);
1513                 } else {
1514                         if (data->offset < 16)
1515                                 info[idx++] = (struct field_modify_info){2, 0,
1516                                                 MLX5_MODI_OUT_SMAC_15_0};
1517                         info[idx] = (struct field_modify_info){4, off,
1518                                                 MLX5_MODI_OUT_SMAC_47_16};
1519                 }
1520                 break;
1521         case RTE_FLOW_FIELD_VLAN_TYPE:
1522                 /* not supported yet */
1523                 break;
1524         case RTE_FLOW_FIELD_VLAN_ID:
1525                 info[idx] = (struct field_modify_info){2, 0,
1526                                         MLX5_MODI_OUT_FIRST_VID};
1527                 if (mask)
1528                         mask[idx] = rte_cpu_to_be_16(0x0fff >> (12 - width));
1529                 break;
1530         case RTE_FLOW_FIELD_MAC_TYPE:
1531                 info[idx] = (struct field_modify_info){2, 0,
1532                                         MLX5_MODI_OUT_ETHERTYPE};
1533                 if (mask)
1534                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1535                 break;
1536         case RTE_FLOW_FIELD_IPV4_DSCP:
1537                 info[idx] = (struct field_modify_info){1, 0,
1538                                         MLX5_MODI_OUT_IP_DSCP};
1539                 if (mask)
1540                         mask[idx] = 0x3f >> (6 - width);
1541                 break;
1542         case RTE_FLOW_FIELD_IPV4_TTL:
1543                 info[idx] = (struct field_modify_info){1, 0,
1544                                         MLX5_MODI_OUT_IPV4_TTL};
1545                 if (mask)
1546                         mask[idx] = 0xff >> (8 - width);
1547                 break;
1548         case RTE_FLOW_FIELD_IPV4_SRC:
1549                 info[idx] = (struct field_modify_info){4, 0,
1550                                         MLX5_MODI_OUT_SIPV4};
1551                 if (mask)
1552                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1553                                                      (32 - width));
1554                 break;
1555         case RTE_FLOW_FIELD_IPV4_DST:
1556                 info[idx] = (struct field_modify_info){4, 0,
1557                                         MLX5_MODI_OUT_DIPV4};
1558                 if (mask)
1559                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1560                                                      (32 - width));
1561                 break;
1562         case RTE_FLOW_FIELD_IPV6_DSCP:
1563                 info[idx] = (struct field_modify_info){1, 0,
1564                                         MLX5_MODI_OUT_IP_DSCP};
1565                 if (mask)
1566                         mask[idx] = 0x3f >> (6 - width);
1567                 break;
1568         case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1569                 info[idx] = (struct field_modify_info){1, 0,
1570                                         MLX5_MODI_OUT_IPV6_HOPLIMIT};
1571                 if (mask)
1572                         mask[idx] = 0xff >> (8 - width);
1573                 break;
1574         case RTE_FLOW_FIELD_IPV6_SRC:
1575                 if (mask) {
1576                         if (data->offset < 32) {
1577                                 info[idx] = (struct field_modify_info){4,
1578                                                 4 * idx,
1579                                                 MLX5_MODI_OUT_SIPV6_31_0};
1580                                 if (width < 32) {
1581                                         mask[idx] =
1582                                                 rte_cpu_to_be_32(0xffffffff >>
1583                                                                  (32 - width));
1584                                         width = 0;
1585                                 } else {
1586                                         mask[idx] = RTE_BE32(0xffffffff);
1587                                         width -= 32;
1588                                 }
1589                                 if (!width)
1590                                         break;
1591                                 ++idx;
1592                         }
1593                         if (data->offset < 64) {
1594                                 info[idx] = (struct field_modify_info){4,
1595                                                 4 * idx,
1596                                                 MLX5_MODI_OUT_SIPV6_63_32};
1597                                 if (width < 32) {
1598                                         mask[idx] =
1599                                                 rte_cpu_to_be_32(0xffffffff >>
1600                                                                  (32 - width));
1601                                         width = 0;
1602                                 } else {
1603                                         mask[idx] = RTE_BE32(0xffffffff);
1604                                         width -= 32;
1605                                 }
1606                                 if (!width)
1607                                         break;
1608                                 ++idx;
1609                         }
1610                         if (data->offset < 96) {
1611                                 info[idx] = (struct field_modify_info){4,
1612                                                 4 * idx,
1613                                                 MLX5_MODI_OUT_SIPV6_95_64};
1614                                 if (width < 32) {
1615                                         mask[idx] =
1616                                                 rte_cpu_to_be_32(0xffffffff >>
1617                                                                  (32 - width));
1618                                         width = 0;
1619                                 } else {
1620                                         mask[idx] = RTE_BE32(0xffffffff);
1621                                         width -= 32;
1622                                 }
1623                                 if (!width)
1624                                         break;
1625                                 ++idx;
1626                         }
1627                         info[idx] = (struct field_modify_info){4, 4 * idx,
1628                                                 MLX5_MODI_OUT_SIPV6_127_96};
1629                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1630                                                      (32 - width));
1631                 } else {
1632                         if (data->offset < 32)
1633                                 info[idx++] = (struct field_modify_info){4, 0,
1634                                                 MLX5_MODI_OUT_SIPV6_31_0};
1635                         if (data->offset < 64)
1636                                 info[idx++] = (struct field_modify_info){4, 0,
1637                                                 MLX5_MODI_OUT_SIPV6_63_32};
1638                         if (data->offset < 96)
1639                                 info[idx++] = (struct field_modify_info){4, 0,
1640                                                 MLX5_MODI_OUT_SIPV6_95_64};
1641                         if (data->offset < 128)
1642                                 info[idx++] = (struct field_modify_info){4, 0,
1643                                                 MLX5_MODI_OUT_SIPV6_127_96};
1644                 }
1645                 break;
1646         case RTE_FLOW_FIELD_IPV6_DST:
1647                 if (mask) {
1648                         if (data->offset < 32) {
1649                                 info[idx] = (struct field_modify_info){4,
1650                                                 4 * idx,
1651                                                 MLX5_MODI_OUT_DIPV6_31_0};
1652                                 if (width < 32) {
1653                                         mask[idx] =
1654                                                 rte_cpu_to_be_32(0xffffffff >>
1655                                                                  (32 - width));
1656                                         width = 0;
1657                                 } else {
1658                                         mask[idx] = RTE_BE32(0xffffffff);
1659                                         width -= 32;
1660                                 }
1661                                 if (!width)
1662                                         break;
1663                                 ++idx;
1664                         }
1665                         if (data->offset < 64) {
1666                                 info[idx] = (struct field_modify_info){4,
1667                                                 4 * idx,
1668                                                 MLX5_MODI_OUT_DIPV6_63_32};
1669                                 if (width < 32) {
1670                                         mask[idx] =
1671                                                 rte_cpu_to_be_32(0xffffffff >>
1672                                                                  (32 - width));
1673                                         width = 0;
1674                                 } else {
1675                                         mask[idx] = RTE_BE32(0xffffffff);
1676                                         width -= 32;
1677                                 }
1678                                 if (!width)
1679                                         break;
1680                                 ++idx;
1681                         }
1682                         if (data->offset < 96) {
1683                                 info[idx] = (struct field_modify_info){4,
1684                                                 4 * idx,
1685                                                 MLX5_MODI_OUT_DIPV6_95_64};
1686                                 if (width < 32) {
1687                                         mask[idx] =
1688                                                 rte_cpu_to_be_32(0xffffffff >>
1689                                                                  (32 - width));
1690                                         width = 0;
1691                                 } else {
1692                                         mask[idx] = RTE_BE32(0xffffffff);
1693                                         width -= 32;
1694                                 }
1695                                 if (!width)
1696                                         break;
1697                                 ++idx;
1698                         }
1699                         info[idx] = (struct field_modify_info){4, 4 * idx,
1700                                                 MLX5_MODI_OUT_DIPV6_127_96};
1701                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1702                                                      (32 - width));
1703                 } else {
1704                         if (data->offset < 32)
1705                                 info[idx++] = (struct field_modify_info){4, 0,
1706                                                 MLX5_MODI_OUT_DIPV6_31_0};
1707                         if (data->offset < 64)
1708                                 info[idx++] = (struct field_modify_info){4, 0,
1709                                                 MLX5_MODI_OUT_DIPV6_63_32};
1710                         if (data->offset < 96)
1711                                 info[idx++] = (struct field_modify_info){4, 0,
1712                                                 MLX5_MODI_OUT_DIPV6_95_64};
1713                         if (data->offset < 128)
1714                                 info[idx++] = (struct field_modify_info){4, 0,
1715                                                 MLX5_MODI_OUT_DIPV6_127_96};
1716                 }
1717                 break;
1718         case RTE_FLOW_FIELD_TCP_PORT_SRC:
1719                 info[idx] = (struct field_modify_info){2, 0,
1720                                         MLX5_MODI_OUT_TCP_SPORT};
1721                 if (mask)
1722                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1723                 break;
1724         case RTE_FLOW_FIELD_TCP_PORT_DST:
1725                 info[idx] = (struct field_modify_info){2, 0,
1726                                         MLX5_MODI_OUT_TCP_DPORT};
1727                 if (mask)
1728                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1729                 break;
1730         case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1731                 info[idx] = (struct field_modify_info){4, 0,
1732                                         MLX5_MODI_OUT_TCP_SEQ_NUM};
1733                 if (mask)
1734                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1735                                                      (32 - width));
1736                 break;
1737         case RTE_FLOW_FIELD_TCP_ACK_NUM:
1738                 info[idx] = (struct field_modify_info){4, 0,
1739                                         MLX5_MODI_OUT_TCP_ACK_NUM};
1740                 if (mask)
1741                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1742                                                      (32 - width));
1743                 break;
1744         case RTE_FLOW_FIELD_TCP_FLAGS:
1745                 info[idx] = (struct field_modify_info){2, 0,
1746                                         MLX5_MODI_OUT_TCP_FLAGS};
1747                 if (mask)
1748                         mask[idx] = rte_cpu_to_be_16(0x1ff >> (9 - width));
1749                 break;
1750         case RTE_FLOW_FIELD_UDP_PORT_SRC:
1751                 info[idx] = (struct field_modify_info){2, 0,
1752                                         MLX5_MODI_OUT_UDP_SPORT};
1753                 if (mask)
1754                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1755                 break;
1756         case RTE_FLOW_FIELD_UDP_PORT_DST:
1757                 info[idx] = (struct field_modify_info){2, 0,
1758                                         MLX5_MODI_OUT_UDP_DPORT};
1759                 if (mask)
1760                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1761                 break;
1762         case RTE_FLOW_FIELD_VXLAN_VNI:
1763                 /* not supported yet */
1764                 break;
1765         case RTE_FLOW_FIELD_GENEVE_VNI:
1766                 /* not supported yet*/
1767                 break;
1768         case RTE_FLOW_FIELD_GTP_TEID:
1769                 info[idx] = (struct field_modify_info){4, 0,
1770                                         MLX5_MODI_GTP_TEID};
1771                 if (mask)
1772                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1773                                                      (32 - width));
1774                 break;
1775         case RTE_FLOW_FIELD_TAG:
1776                 {
1777                         int reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG,
1778                                                    data->level, error);
1779                         if (reg < 0)
1780                                 return;
1781                         MLX5_ASSERT(reg != REG_NON);
1782                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1783                         info[idx] = (struct field_modify_info){4, 0,
1784                                                 reg_to_field[reg]};
1785                         if (mask)
1786                                 mask[idx] =
1787                                         rte_cpu_to_be_32(0xffffffff >>
1788                                                          (32 - width));
1789                 }
1790                 break;
1791         case RTE_FLOW_FIELD_MARK:
1792                 {
1793                         int reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK,
1794                                                        0, error);
1795                         if (reg < 0)
1796                                 return;
1797                         MLX5_ASSERT(reg != REG_NON);
1798                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1799                         info[idx] = (struct field_modify_info){4, 0,
1800                                                 reg_to_field[reg]};
1801                         if (mask)
1802                                 mask[idx] =
1803                                         rte_cpu_to_be_32(0xffffffff >>
1804                                                          (32 - width));
1805                 }
1806                 break;
1807         case RTE_FLOW_FIELD_META:
1808                 {
1809                         unsigned int xmeta = config->dv_xmeta_en;
1810                         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1811                         if (reg < 0)
1812                                 return;
1813                         MLX5_ASSERT(reg != REG_NON);
1814                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1815                         if (xmeta == MLX5_XMETA_MODE_META16) {
1816                                 info[idx] = (struct field_modify_info){2, 0,
1817                                                         reg_to_field[reg]};
1818                                 if (mask)
1819                                         mask[idx] = rte_cpu_to_be_16(0xffff >>
1820                                                                 (16 - width));
1821                         } else if (xmeta == MLX5_XMETA_MODE_META32) {
1822                                 info[idx] = (struct field_modify_info){4, 0,
1823                                                         reg_to_field[reg]};
1824                                 if (mask)
1825                                         mask[idx] =
1826                                                 rte_cpu_to_be_32(0xffffffff >>
1827                                                                 (32 - width));
1828                         } else {
1829                                 MLX5_ASSERT(false);
1830                         }
1831                 }
1832                 break;
1833         case RTE_FLOW_FIELD_POINTER:
1834         case RTE_FLOW_FIELD_VALUE:
1835                 if (data->field == RTE_FLOW_FIELD_POINTER)
1836                         memcpy(&val, (void *)(uintptr_t)data->value,
1837                                sizeof(uint64_t));
1838                 else
1839                         val = data->value;
1840                 for (idx = 0; idx < MLX5_ACT_MAX_MOD_FIELDS; idx++) {
1841                         if (mask[idx]) {
1842                                 if (dst_width == 48) {
1843                                         /*special case for MAC addresses */
1844                                         value[idx] = rte_cpu_to_be_16(val);
1845                                         val >>= 16;
1846                                         dst_width -= 16;
1847                                 } else if (dst_width > 16) {
1848                                         value[idx] = rte_cpu_to_be_32(val);
1849                                         val >>= 32;
1850                                 } else if (dst_width > 8) {
1851                                         value[idx] = rte_cpu_to_be_16(val);
1852                                         val >>= 16;
1853                                 } else {
1854                                         value[idx] = (uint8_t)val;
1855                                         val >>= 8;
1856                                 }
1857                                 if (!val)
1858                                         break;
1859                         }
1860                 }
1861                 break;
1862         default:
1863                 MLX5_ASSERT(false);
1864                 break;
1865         }
1866 }
1867
1868 /**
1869  * Convert modify_field action to DV specification.
1870  *
1871  * @param[in] dev
1872  *   Pointer to the rte_eth_dev structure.
1873  * @param[in,out] resource
1874  *   Pointer to the modify-header resource.
1875  * @param[in] action
1876  *   Pointer to action specification.
1877  * @param[in] attr
1878  *   Attributes of flow that includes this item.
1879  * @param[out] error
1880  *   Pointer to the error structure.
1881  *
1882  * @return
1883  *   0 on success, a negative errno value otherwise and rte_errno is set.
1884  */
1885 static int
1886 flow_dv_convert_action_modify_field
1887                         (struct rte_eth_dev *dev,
1888                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1889                          const struct rte_flow_action *action,
1890                          const struct rte_flow_attr *attr,
1891                          struct rte_flow_error *error)
1892 {
1893         struct mlx5_priv *priv = dev->data->dev_private;
1894         struct mlx5_dev_config *config = &priv->config;
1895         const struct rte_flow_action_modify_field *conf =
1896                 (const struct rte_flow_action_modify_field *)(action->conf);
1897         struct rte_flow_item item;
1898         struct field_modify_info field[MLX5_ACT_MAX_MOD_FIELDS] = {
1899                                                                 {0, 0, 0} };
1900         struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS] = {
1901                                                                 {0, 0, 0} };
1902         uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
1903         uint32_t value[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
1904         uint32_t type;
1905         uint32_t dst_width = mlx5_flow_item_field_width(config,
1906                                                         conf->dst.field);
1907
1908         if (conf->src.field == RTE_FLOW_FIELD_POINTER ||
1909                 conf->src.field == RTE_FLOW_FIELD_VALUE) {
1910                 type = MLX5_MODIFICATION_TYPE_SET;
1911                 /** For SET fill the destination field (field) first. */
1912                 mlx5_flow_field_id_to_modify_info(&conf->dst, field, mask,
1913                         value, conf->width, dst_width, dev, attr, error);
1914                 /** Then copy immediate value from source as per mask. */
1915                 mlx5_flow_field_id_to_modify_info(&conf->src, dcopy, mask,
1916                         value, conf->width, dst_width, dev, attr, error);
1917                 item.spec = &value;
1918         } else {
1919                 type = MLX5_MODIFICATION_TYPE_COPY;
1920                 /** For COPY fill the destination field (dcopy) without mask. */
1921                 mlx5_flow_field_id_to_modify_info(&conf->dst, dcopy, NULL,
1922                         value, conf->width, dst_width, dev, attr, error);
1923                 /** Then construct the source field (field) with mask. */
1924                 mlx5_flow_field_id_to_modify_info(&conf->src, field, mask,
1925                         value, conf->width, dst_width, dev, attr, error);
1926         }
1927         item.mask = &mask;
1928         return flow_dv_convert_modify_action(&item,
1929                         field, dcopy, resource, type, error);
1930 }
1931
1932 /**
1933  * Validate MARK item.
1934  *
1935  * @param[in] dev
1936  *   Pointer to the rte_eth_dev structure.
1937  * @param[in] item
1938  *   Item specification.
1939  * @param[in] attr
1940  *   Attributes of flow that includes this item.
1941  * @param[out] error
1942  *   Pointer to error structure.
1943  *
1944  * @return
1945  *   0 on success, a negative errno value otherwise and rte_errno is set.
1946  */
1947 static int
1948 flow_dv_validate_item_mark(struct rte_eth_dev *dev,
1949                            const struct rte_flow_item *item,
1950                            const struct rte_flow_attr *attr __rte_unused,
1951                            struct rte_flow_error *error)
1952 {
1953         struct mlx5_priv *priv = dev->data->dev_private;
1954         struct mlx5_dev_config *config = &priv->config;
1955         const struct rte_flow_item_mark *spec = item->spec;
1956         const struct rte_flow_item_mark *mask = item->mask;
1957         const struct rte_flow_item_mark nic_mask = {
1958                 .id = priv->sh->dv_mark_mask,
1959         };
1960         int ret;
1961
1962         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1963                 return rte_flow_error_set(error, ENOTSUP,
1964                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1965                                           "extended metadata feature"
1966                                           " isn't enabled");
1967         if (!mlx5_flow_ext_mreg_supported(dev))
1968                 return rte_flow_error_set(error, ENOTSUP,
1969                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1970                                           "extended metadata register"
1971                                           " isn't supported");
1972         if (!nic_mask.id)
1973                 return rte_flow_error_set(error, ENOTSUP,
1974                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1975                                           "extended metadata register"
1976                                           " isn't available");
1977         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1978         if (ret < 0)
1979                 return ret;
1980         if (!spec)
1981                 return rte_flow_error_set(error, EINVAL,
1982                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1983                                           item->spec,
1984                                           "data cannot be empty");
1985         if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id))
1986                 return rte_flow_error_set(error, EINVAL,
1987                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1988                                           &spec->id,
1989                                           "mark id exceeds the limit");
1990         if (!mask)
1991                 mask = &nic_mask;
1992         if (!mask->id)
1993                 return rte_flow_error_set(error, EINVAL,
1994                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1995                                         "mask cannot be zero");
1996
1997         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1998                                         (const uint8_t *)&nic_mask,
1999                                         sizeof(struct rte_flow_item_mark),
2000                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2001         if (ret < 0)
2002                 return ret;
2003         return 0;
2004 }
2005
2006 /**
2007  * Validate META item.
2008  *
2009  * @param[in] dev
2010  *   Pointer to the rte_eth_dev structure.
2011  * @param[in] item
2012  *   Item specification.
2013  * @param[in] attr
2014  *   Attributes of flow that includes this item.
2015  * @param[out] error
2016  *   Pointer to error structure.
2017  *
2018  * @return
2019  *   0 on success, a negative errno value otherwise and rte_errno is set.
2020  */
2021 static int
2022 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
2023                            const struct rte_flow_item *item,
2024                            const struct rte_flow_attr *attr,
2025                            struct rte_flow_error *error)
2026 {
2027         struct mlx5_priv *priv = dev->data->dev_private;
2028         struct mlx5_dev_config *config = &priv->config;
2029         const struct rte_flow_item_meta *spec = item->spec;
2030         const struct rte_flow_item_meta *mask = item->mask;
2031         struct rte_flow_item_meta nic_mask = {
2032                 .data = UINT32_MAX
2033         };
2034         int reg;
2035         int ret;
2036
2037         if (!spec)
2038                 return rte_flow_error_set(error, EINVAL,
2039                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2040                                           item->spec,
2041                                           "data cannot be empty");
2042         if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
2043                 if (!mlx5_flow_ext_mreg_supported(dev))
2044                         return rte_flow_error_set(error, ENOTSUP,
2045                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2046                                           "extended metadata register"
2047                                           " isn't supported");
2048                 reg = flow_dv_get_metadata_reg(dev, attr, error);
2049                 if (reg < 0)
2050                         return reg;
2051                 if (reg == REG_NON)
2052                         return rte_flow_error_set(error, ENOTSUP,
2053                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2054                                         "unavalable extended metadata register");
2055                 if (reg == REG_B)
2056                         return rte_flow_error_set(error, ENOTSUP,
2057                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2058                                           "match on reg_b "
2059                                           "isn't supported");
2060                 if (reg != REG_A)
2061                         nic_mask.data = priv->sh->dv_meta_mask;
2062         } else {
2063                 if (attr->transfer)
2064                         return rte_flow_error_set(error, ENOTSUP,
2065                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2066                                         "extended metadata feature "
2067                                         "should be enabled when "
2068                                         "meta item is requested "
2069                                         "with e-switch mode ");
2070                 if (attr->ingress)
2071                         return rte_flow_error_set(error, ENOTSUP,
2072                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2073                                         "match on metadata for ingress "
2074                                         "is not supported in legacy "
2075                                         "metadata mode");
2076         }
2077         if (!mask)
2078                 mask = &rte_flow_item_meta_mask;
2079         if (!mask->data)
2080                 return rte_flow_error_set(error, EINVAL,
2081                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2082                                         "mask cannot be zero");
2083
2084         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2085                                         (const uint8_t *)&nic_mask,
2086                                         sizeof(struct rte_flow_item_meta),
2087                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2088         return ret;
2089 }
2090
2091 /**
2092  * Validate TAG item.
2093  *
2094  * @param[in] dev
2095  *   Pointer to the rte_eth_dev structure.
2096  * @param[in] item
2097  *   Item specification.
2098  * @param[in] attr
2099  *   Attributes of flow that includes this item.
2100  * @param[out] error
2101  *   Pointer to error structure.
2102  *
2103  * @return
2104  *   0 on success, a negative errno value otherwise and rte_errno is set.
2105  */
2106 static int
2107 flow_dv_validate_item_tag(struct rte_eth_dev *dev,
2108                           const struct rte_flow_item *item,
2109                           const struct rte_flow_attr *attr __rte_unused,
2110                           struct rte_flow_error *error)
2111 {
2112         const struct rte_flow_item_tag *spec = item->spec;
2113         const struct rte_flow_item_tag *mask = item->mask;
2114         const struct rte_flow_item_tag nic_mask = {
2115                 .data = RTE_BE32(UINT32_MAX),
2116                 .index = 0xff,
2117         };
2118         int ret;
2119
2120         if (!mlx5_flow_ext_mreg_supported(dev))
2121                 return rte_flow_error_set(error, ENOTSUP,
2122                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2123                                           "extensive metadata register"
2124                                           " isn't supported");
2125         if (!spec)
2126                 return rte_flow_error_set(error, EINVAL,
2127                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2128                                           item->spec,
2129                                           "data cannot be empty");
2130         if (!mask)
2131                 mask = &rte_flow_item_tag_mask;
2132         if (!mask->data)
2133                 return rte_flow_error_set(error, EINVAL,
2134                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2135                                         "mask cannot be zero");
2136
2137         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2138                                         (const uint8_t *)&nic_mask,
2139                                         sizeof(struct rte_flow_item_tag),
2140                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2141         if (ret < 0)
2142                 return ret;
2143         if (mask->index != 0xff)
2144                 return rte_flow_error_set(error, EINVAL,
2145                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2146                                           "partial mask for tag index"
2147                                           " is not supported");
2148         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
2149         if (ret < 0)
2150                 return ret;
2151         MLX5_ASSERT(ret != REG_NON);
2152         return 0;
2153 }
2154
2155 /**
2156  * Validate vport item.
2157  *
2158  * @param[in] dev
2159  *   Pointer to the rte_eth_dev structure.
2160  * @param[in] item
2161  *   Item specification.
2162  * @param[in] attr
2163  *   Attributes of flow that includes this item.
2164  * @param[in] item_flags
2165  *   Bit-fields that holds the items detected until now.
2166  * @param[out] error
2167  *   Pointer to error structure.
2168  *
2169  * @return
2170  *   0 on success, a negative errno value otherwise and rte_errno is set.
2171  */
2172 static int
2173 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
2174                               const struct rte_flow_item *item,
2175                               const struct rte_flow_attr *attr,
2176                               uint64_t item_flags,
2177                               struct rte_flow_error *error)
2178 {
2179         const struct rte_flow_item_port_id *spec = item->spec;
2180         const struct rte_flow_item_port_id *mask = item->mask;
2181         const struct rte_flow_item_port_id switch_mask = {
2182                         .id = 0xffffffff,
2183         };
2184         struct mlx5_priv *esw_priv;
2185         struct mlx5_priv *dev_priv;
2186         int ret;
2187
2188         if (!attr->transfer)
2189                 return rte_flow_error_set(error, EINVAL,
2190                                           RTE_FLOW_ERROR_TYPE_ITEM,
2191                                           NULL,
2192                                           "match on port id is valid only"
2193                                           " when transfer flag is enabled");
2194         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
2195                 return rte_flow_error_set(error, ENOTSUP,
2196                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2197                                           "multiple source ports are not"
2198                                           " supported");
2199         if (!mask)
2200                 mask = &switch_mask;
2201         if (mask->id != 0xffffffff)
2202                 return rte_flow_error_set(error, ENOTSUP,
2203                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2204                                            mask,
2205                                            "no support for partial mask on"
2206                                            " \"id\" field");
2207         ret = mlx5_flow_item_acceptable
2208                                 (item, (const uint8_t *)mask,
2209                                  (const uint8_t *)&rte_flow_item_port_id_mask,
2210                                  sizeof(struct rte_flow_item_port_id),
2211                                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2212         if (ret)
2213                 return ret;
2214         if (!spec)
2215                 return 0;
2216         esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
2217         if (!esw_priv)
2218                 return rte_flow_error_set(error, rte_errno,
2219                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2220                                           "failed to obtain E-Switch info for"
2221                                           " port");
2222         dev_priv = mlx5_dev_to_eswitch_info(dev);
2223         if (!dev_priv)
2224                 return rte_flow_error_set(error, rte_errno,
2225                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2226                                           NULL,
2227                                           "failed to obtain E-Switch info");
2228         if (esw_priv->domain_id != dev_priv->domain_id)
2229                 return rte_flow_error_set(error, EINVAL,
2230                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2231                                           "cannot match on a port from a"
2232                                           " different E-Switch");
2233         return 0;
2234 }
2235
2236 /**
2237  * Validate VLAN item.
2238  *
2239  * @param[in] item
2240  *   Item specification.
2241  * @param[in] item_flags
2242  *   Bit-fields that holds the items detected until now.
2243  * @param[in] dev
2244  *   Ethernet device flow is being created on.
2245  * @param[out] error
2246  *   Pointer to error structure.
2247  *
2248  * @return
2249  *   0 on success, a negative errno value otherwise and rte_errno is set.
2250  */
2251 static int
2252 flow_dv_validate_item_vlan(const struct rte_flow_item *item,
2253                            uint64_t item_flags,
2254                            struct rte_eth_dev *dev,
2255                            struct rte_flow_error *error)
2256 {
2257         const struct rte_flow_item_vlan *mask = item->mask;
2258         const struct rte_flow_item_vlan nic_mask = {
2259                 .tci = RTE_BE16(UINT16_MAX),
2260                 .inner_type = RTE_BE16(UINT16_MAX),
2261                 .has_more_vlan = 1,
2262         };
2263         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2264         int ret;
2265         const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
2266                                         MLX5_FLOW_LAYER_INNER_L4) :
2267                                        (MLX5_FLOW_LAYER_OUTER_L3 |
2268                                         MLX5_FLOW_LAYER_OUTER_L4);
2269         const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
2270                                         MLX5_FLOW_LAYER_OUTER_VLAN;
2271
2272         if (item_flags & vlanm)
2273                 return rte_flow_error_set(error, EINVAL,
2274                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2275                                           "multiple VLAN layers not supported");
2276         else if ((item_flags & l34m) != 0)
2277                 return rte_flow_error_set(error, EINVAL,
2278                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2279                                           "VLAN cannot follow L3/L4 layer");
2280         if (!mask)
2281                 mask = &rte_flow_item_vlan_mask;
2282         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2283                                         (const uint8_t *)&nic_mask,
2284                                         sizeof(struct rte_flow_item_vlan),
2285                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2286         if (ret)
2287                 return ret;
2288         if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
2289                 struct mlx5_priv *priv = dev->data->dev_private;
2290
2291                 if (priv->vmwa_context) {
2292                         /*
2293                          * Non-NULL context means we have a virtual machine
2294                          * and SR-IOV enabled, we have to create VLAN interface
2295                          * to make hypervisor to setup E-Switch vport
2296                          * context correctly. We avoid creating the multiple
2297                          * VLAN interfaces, so we cannot support VLAN tag mask.
2298                          */
2299                         return rte_flow_error_set(error, EINVAL,
2300                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2301                                                   item,
2302                                                   "VLAN tag mask is not"
2303                                                   " supported in virtual"
2304                                                   " environment");
2305                 }
2306         }
2307         return 0;
2308 }
2309
2310 /*
2311  * GTP flags are contained in 1 byte of the format:
2312  * -------------------------------------------
2313  * | bit   | 0 - 2   | 3  | 4   | 5 | 6 | 7  |
2314  * |-----------------------------------------|
2315  * | value | Version | PT | Res | E | S | PN |
2316  * -------------------------------------------
2317  *
2318  * Matching is supported only for GTP flags E, S, PN.
2319  */
2320 #define MLX5_GTP_FLAGS_MASK     0x07
2321
2322 /**
2323  * Validate GTP item.
2324  *
2325  * @param[in] dev
2326  *   Pointer to the rte_eth_dev structure.
2327  * @param[in] item
2328  *   Item specification.
2329  * @param[in] item_flags
2330  *   Bit-fields that holds the items detected until now.
2331  * @param[out] error
2332  *   Pointer to error structure.
2333  *
2334  * @return
2335  *   0 on success, a negative errno value otherwise and rte_errno is set.
2336  */
2337 static int
2338 flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
2339                           const struct rte_flow_item *item,
2340                           uint64_t item_flags,
2341                           struct rte_flow_error *error)
2342 {
2343         struct mlx5_priv *priv = dev->data->dev_private;
2344         const struct rte_flow_item_gtp *spec = item->spec;
2345         const struct rte_flow_item_gtp *mask = item->mask;
2346         const struct rte_flow_item_gtp nic_mask = {
2347                 .v_pt_rsv_flags = MLX5_GTP_FLAGS_MASK,
2348                 .msg_type = 0xff,
2349                 .teid = RTE_BE32(0xffffffff),
2350         };
2351
2352         if (!priv->config.hca_attr.tunnel_stateless_gtp)
2353                 return rte_flow_error_set(error, ENOTSUP,
2354                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2355                                           "GTP support is not enabled");
2356         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2357                 return rte_flow_error_set(error, ENOTSUP,
2358                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2359                                           "multiple tunnel layers not"
2360                                           " supported");
2361         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2362                 return rte_flow_error_set(error, EINVAL,
2363                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2364                                           "no outer UDP layer found");
2365         if (!mask)
2366                 mask = &rte_flow_item_gtp_mask;
2367         if (spec && spec->v_pt_rsv_flags & ~MLX5_GTP_FLAGS_MASK)
2368                 return rte_flow_error_set(error, ENOTSUP,
2369                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2370                                           "Match is supported for GTP"
2371                                           " flags only");
2372         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2373                                          (const uint8_t *)&nic_mask,
2374                                          sizeof(struct rte_flow_item_gtp),
2375                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2376 }
2377
2378 /**
2379  * Validate GTP PSC item.
2380  *
2381  * @param[in] item
2382  *   Item specification.
2383  * @param[in] last_item
2384  *   Previous validated item in the pattern items.
2385  * @param[in] gtp_item
2386  *   Previous GTP item specification.
2387  * @param[in] attr
2388  *   Pointer to flow attributes.
2389  * @param[out] error
2390  *   Pointer to error structure.
2391  *
2392  * @return
2393  *   0 on success, a negative errno value otherwise and rte_errno is set.
2394  */
2395 static int
2396 flow_dv_validate_item_gtp_psc(const struct rte_flow_item *item,
2397                               uint64_t last_item,
2398                               const struct rte_flow_item *gtp_item,
2399                               const struct rte_flow_attr *attr,
2400                               struct rte_flow_error *error)
2401 {
2402         const struct rte_flow_item_gtp *gtp_spec;
2403         const struct rte_flow_item_gtp *gtp_mask;
2404         const struct rte_flow_item_gtp_psc *spec;
2405         const struct rte_flow_item_gtp_psc *mask;
2406         const struct rte_flow_item_gtp_psc nic_mask = {
2407                 .pdu_type = 0xFF,
2408                 .qfi = 0xFF,
2409         };
2410
2411         if (!gtp_item || !(last_item & MLX5_FLOW_LAYER_GTP))
2412                 return rte_flow_error_set
2413                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2414                          "GTP PSC item must be preceded with GTP item");
2415         gtp_spec = gtp_item->spec;
2416         gtp_mask = gtp_item->mask ? gtp_item->mask : &rte_flow_item_gtp_mask;
2417         /* GTP spec and E flag is requested to match zero. */
2418         if (gtp_spec &&
2419                 (gtp_mask->v_pt_rsv_flags &
2420                 ~gtp_spec->v_pt_rsv_flags & MLX5_GTP_EXT_HEADER_FLAG))
2421                 return rte_flow_error_set
2422                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2423                          "GTP E flag must be 1 to match GTP PSC");
2424         /* Check the flow is not created in group zero. */
2425         if (!attr->transfer && !attr->group)
2426                 return rte_flow_error_set
2427                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2428                          "GTP PSC is not supported for group 0");
2429         /* GTP spec is here and E flag is requested to match zero. */
2430         if (!item->spec)
2431                 return 0;
2432         spec = item->spec;
2433         mask = item->mask ? item->mask : &rte_flow_item_gtp_psc_mask;
2434         if (spec->pdu_type > MLX5_GTP_EXT_MAX_PDU_TYPE)
2435                 return rte_flow_error_set
2436                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2437                          "PDU type should be smaller than 16");
2438         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2439                                          (const uint8_t *)&nic_mask,
2440                                          sizeof(struct rte_flow_item_gtp_psc),
2441                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2442 }
2443
2444 /**
2445  * Validate IPV4 item.
2446  * Use existing validation function mlx5_flow_validate_item_ipv4(), and
2447  * add specific validation of fragment_offset field,
2448  *
2449  * @param[in] item
2450  *   Item specification.
2451  * @param[in] item_flags
2452  *   Bit-fields that holds the items detected until now.
2453  * @param[out] error
2454  *   Pointer to error structure.
2455  *
2456  * @return
2457  *   0 on success, a negative errno value otherwise and rte_errno is set.
2458  */
2459 static int
2460 flow_dv_validate_item_ipv4(const struct rte_flow_item *item,
2461                            uint64_t item_flags,
2462                            uint64_t last_item,
2463                            uint16_t ether_type,
2464                            struct rte_flow_error *error)
2465 {
2466         int ret;
2467         const struct rte_flow_item_ipv4 *spec = item->spec;
2468         const struct rte_flow_item_ipv4 *last = item->last;
2469         const struct rte_flow_item_ipv4 *mask = item->mask;
2470         rte_be16_t fragment_offset_spec = 0;
2471         rte_be16_t fragment_offset_last = 0;
2472         const struct rte_flow_item_ipv4 nic_ipv4_mask = {
2473                 .hdr = {
2474                         .src_addr = RTE_BE32(0xffffffff),
2475                         .dst_addr = RTE_BE32(0xffffffff),
2476                         .type_of_service = 0xff,
2477                         .fragment_offset = RTE_BE16(0xffff),
2478                         .next_proto_id = 0xff,
2479                         .time_to_live = 0xff,
2480                 },
2481         };
2482
2483         ret = mlx5_flow_validate_item_ipv4(item, item_flags, last_item,
2484                                            ether_type, &nic_ipv4_mask,
2485                                            MLX5_ITEM_RANGE_ACCEPTED, error);
2486         if (ret < 0)
2487                 return ret;
2488         if (spec && mask)
2489                 fragment_offset_spec = spec->hdr.fragment_offset &
2490                                        mask->hdr.fragment_offset;
2491         if (!fragment_offset_spec)
2492                 return 0;
2493         /*
2494          * spec and mask are valid, enforce using full mask to make sure the
2495          * complete value is used correctly.
2496          */
2497         if ((mask->hdr.fragment_offset & RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2498                         != RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2499                 return rte_flow_error_set(error, EINVAL,
2500                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2501                                           item, "must use full mask for"
2502                                           " fragment_offset");
2503         /*
2504          * Match on fragment_offset 0x2000 means MF is 1 and frag-offset is 0,
2505          * indicating this is 1st fragment of fragmented packet.
2506          * This is not yet supported in MLX5, return appropriate error message.
2507          */
2508         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG))
2509                 return rte_flow_error_set(error, ENOTSUP,
2510                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2511                                           "match on first fragment not "
2512                                           "supported");
2513         if (fragment_offset_spec && !last)
2514                 return rte_flow_error_set(error, ENOTSUP,
2515                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2516                                           "specified value not supported");
2517         /* spec and last are valid, validate the specified range. */
2518         fragment_offset_last = last->hdr.fragment_offset &
2519                                mask->hdr.fragment_offset;
2520         /*
2521          * Match on fragment_offset spec 0x2001 and last 0x3fff
2522          * means MF is 1 and frag-offset is > 0.
2523          * This packet is fragment 2nd and onward, excluding last.
2524          * This is not yet supported in MLX5, return appropriate
2525          * error message.
2526          */
2527         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG + 1) &&
2528             fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2529                 return rte_flow_error_set(error, ENOTSUP,
2530                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2531                                           last, "match on following "
2532                                           "fragments not supported");
2533         /*
2534          * Match on fragment_offset spec 0x0001 and last 0x1fff
2535          * means MF is 0 and frag-offset is > 0.
2536          * This packet is last fragment of fragmented packet.
2537          * This is not yet supported in MLX5, return appropriate
2538          * error message.
2539          */
2540         if (fragment_offset_spec == RTE_BE16(1) &&
2541             fragment_offset_last == RTE_BE16(RTE_IPV4_HDR_OFFSET_MASK))
2542                 return rte_flow_error_set(error, ENOTSUP,
2543                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2544                                           last, "match on last "
2545                                           "fragment not supported");
2546         /*
2547          * Match on fragment_offset spec 0x0001 and last 0x3fff
2548          * means MF and/or frag-offset is not 0.
2549          * This is a fragmented packet.
2550          * Other range values are invalid and rejected.
2551          */
2552         if (!(fragment_offset_spec == RTE_BE16(1) &&
2553               fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)))
2554                 return rte_flow_error_set(error, ENOTSUP,
2555                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2556                                           "specified range not supported");
2557         return 0;
2558 }
2559
2560 /**
2561  * Validate IPV6 fragment extension item.
2562  *
2563  * @param[in] item
2564  *   Item specification.
2565  * @param[in] item_flags
2566  *   Bit-fields that holds the items detected until now.
2567  * @param[out] error
2568  *   Pointer to error structure.
2569  *
2570  * @return
2571  *   0 on success, a negative errno value otherwise and rte_errno is set.
2572  */
2573 static int
2574 flow_dv_validate_item_ipv6_frag_ext(const struct rte_flow_item *item,
2575                                     uint64_t item_flags,
2576                                     struct rte_flow_error *error)
2577 {
2578         const struct rte_flow_item_ipv6_frag_ext *spec = item->spec;
2579         const struct rte_flow_item_ipv6_frag_ext *last = item->last;
2580         const struct rte_flow_item_ipv6_frag_ext *mask = item->mask;
2581         rte_be16_t frag_data_spec = 0;
2582         rte_be16_t frag_data_last = 0;
2583         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2584         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2585                                       MLX5_FLOW_LAYER_OUTER_L4;
2586         int ret = 0;
2587         struct rte_flow_item_ipv6_frag_ext nic_mask = {
2588                 .hdr = {
2589                         .next_header = 0xff,
2590                         .frag_data = RTE_BE16(0xffff),
2591                 },
2592         };
2593
2594         if (item_flags & l4m)
2595                 return rte_flow_error_set(error, EINVAL,
2596                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2597                                           "ipv6 fragment extension item cannot "
2598                                           "follow L4 item.");
2599         if ((tunnel && !(item_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
2600             (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6)))
2601                 return rte_flow_error_set(error, EINVAL,
2602                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2603                                           "ipv6 fragment extension item must "
2604                                           "follow ipv6 item");
2605         if (spec && mask)
2606                 frag_data_spec = spec->hdr.frag_data & mask->hdr.frag_data;
2607         if (!frag_data_spec)
2608                 return 0;
2609         /*
2610          * spec and mask are valid, enforce using full mask to make sure the
2611          * complete value is used correctly.
2612          */
2613         if ((mask->hdr.frag_data & RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) !=
2614                                 RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2615                 return rte_flow_error_set(error, EINVAL,
2616                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2617                                           item, "must use full mask for"
2618                                           " frag_data");
2619         /*
2620          * Match on frag_data 0x00001 means M is 1 and frag-offset is 0.
2621          * This is 1st fragment of fragmented packet.
2622          */
2623         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_MF_MASK))
2624                 return rte_flow_error_set(error, ENOTSUP,
2625                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2626                                           "match on first fragment not "
2627                                           "supported");
2628         if (frag_data_spec && !last)
2629                 return rte_flow_error_set(error, EINVAL,
2630                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2631                                           "specified value not supported");
2632         ret = mlx5_flow_item_acceptable
2633                                 (item, (const uint8_t *)mask,
2634                                  (const uint8_t *)&nic_mask,
2635                                  sizeof(struct rte_flow_item_ipv6_frag_ext),
2636                                  MLX5_ITEM_RANGE_ACCEPTED, error);
2637         if (ret)
2638                 return ret;
2639         /* spec and last are valid, validate the specified range. */
2640         frag_data_last = last->hdr.frag_data & mask->hdr.frag_data;
2641         /*
2642          * Match on frag_data spec 0x0009 and last 0xfff9
2643          * means M is 1 and frag-offset is > 0.
2644          * This packet is fragment 2nd and onward, excluding last.
2645          * This is not yet supported in MLX5, return appropriate
2646          * error message.
2647          */
2648         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN |
2649                                        RTE_IPV6_EHDR_MF_MASK) &&
2650             frag_data_last == RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2651                 return rte_flow_error_set(error, ENOTSUP,
2652                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2653                                           last, "match on following "
2654                                           "fragments not supported");
2655         /*
2656          * Match on frag_data spec 0x0008 and last 0xfff8
2657          * means M is 0 and frag-offset is > 0.
2658          * This packet is last fragment of fragmented packet.
2659          * This is not yet supported in MLX5, return appropriate
2660          * error message.
2661          */
2662         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN) &&
2663             frag_data_last == RTE_BE16(RTE_IPV6_EHDR_FO_MASK))
2664                 return rte_flow_error_set(error, ENOTSUP,
2665                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2666                                           last, "match on last "
2667                                           "fragment not supported");
2668         /* Other range values are invalid and rejected. */
2669         return rte_flow_error_set(error, EINVAL,
2670                                   RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2671                                   "specified range not supported");
2672 }
2673
2674 /*
2675  * Validate ASO CT item.
2676  *
2677  * @param[in] dev
2678  *   Pointer to the rte_eth_dev structure.
2679  * @param[in] item
2680  *   Item specification.
2681  * @param[in] item_flags
2682  *   Pointer to bit-fields that holds the items detected until now.
2683  * @param[out] error
2684  *   Pointer to error structure.
2685  *
2686  * @return
2687  *   0 on success, a negative errno value otherwise and rte_errno is set.
2688  */
2689 static int
2690 flow_dv_validate_item_aso_ct(struct rte_eth_dev *dev,
2691                              const struct rte_flow_item *item,
2692                              uint64_t *item_flags,
2693                              struct rte_flow_error *error)
2694 {
2695         const struct rte_flow_item_conntrack *spec = item->spec;
2696         const struct rte_flow_item_conntrack *mask = item->mask;
2697         RTE_SET_USED(dev);
2698         uint32_t flags;
2699
2700         if (*item_flags & MLX5_FLOW_LAYER_ASO_CT)
2701                 return rte_flow_error_set(error, EINVAL,
2702                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2703                                           "Only one CT is supported");
2704         if (!mask)
2705                 mask = &rte_flow_item_conntrack_mask;
2706         flags = spec->flags & mask->flags;
2707         if ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID) &&
2708             ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID) ||
2709              (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD) ||
2710              (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)))
2711                 return rte_flow_error_set(error, EINVAL,
2712                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2713                                           "Conflict status bits");
2714         /* State change also needs to be considered. */
2715         *item_flags |= MLX5_FLOW_LAYER_ASO_CT;
2716         return 0;
2717 }
2718
2719 /**
2720  * Validate the pop VLAN action.
2721  *
2722  * @param[in] dev
2723  *   Pointer to the rte_eth_dev structure.
2724  * @param[in] action_flags
2725  *   Holds the actions detected until now.
2726  * @param[in] action
2727  *   Pointer to the pop vlan action.
2728  * @param[in] item_flags
2729  *   The items found in this flow rule.
2730  * @param[in] attr
2731  *   Pointer to flow attributes.
2732  * @param[out] error
2733  *   Pointer to error structure.
2734  *
2735  * @return
2736  *   0 on success, a negative errno value otherwise and rte_errno is set.
2737  */
2738 static int
2739 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
2740                                  uint64_t action_flags,
2741                                  const struct rte_flow_action *action,
2742                                  uint64_t item_flags,
2743                                  const struct rte_flow_attr *attr,
2744                                  struct rte_flow_error *error)
2745 {
2746         const struct mlx5_priv *priv = dev->data->dev_private;
2747
2748         (void)action;
2749         (void)attr;
2750         if (!priv->sh->pop_vlan_action)
2751                 return rte_flow_error_set(error, ENOTSUP,
2752                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2753                                           NULL,
2754                                           "pop vlan action is not supported");
2755         if (attr->egress)
2756                 return rte_flow_error_set(error, ENOTSUP,
2757                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2758                                           NULL,
2759                                           "pop vlan action not supported for "
2760                                           "egress");
2761         if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
2762                 return rte_flow_error_set(error, ENOTSUP,
2763                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2764                                           "no support for multiple VLAN "
2765                                           "actions");
2766         /* Pop VLAN with preceding Decap requires inner header with VLAN. */
2767         if ((action_flags & MLX5_FLOW_ACTION_DECAP) &&
2768             !(item_flags & MLX5_FLOW_LAYER_INNER_VLAN))
2769                 return rte_flow_error_set(error, ENOTSUP,
2770                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2771                                           NULL,
2772                                           "cannot pop vlan after decap without "
2773                                           "match on inner vlan in the flow");
2774         /* Pop VLAN without preceding Decap requires outer header with VLAN. */
2775         if (!(action_flags & MLX5_FLOW_ACTION_DECAP) &&
2776             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2777                 return rte_flow_error_set(error, ENOTSUP,
2778                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2779                                           NULL,
2780                                           "cannot pop vlan without a "
2781                                           "match on (outer) vlan in the flow");
2782         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2783                 return rte_flow_error_set(error, EINVAL,
2784                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2785                                           "wrong action order, port_id should "
2786                                           "be after pop VLAN action");
2787         if (!attr->transfer && priv->representor)
2788                 return rte_flow_error_set(error, ENOTSUP,
2789                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2790                                           "pop vlan action for VF representor "
2791                                           "not supported on NIC table");
2792         return 0;
2793 }
2794
2795 /**
2796  * Get VLAN default info from vlan match info.
2797  *
2798  * @param[in] items
2799  *   the list of item specifications.
2800  * @param[out] vlan
2801  *   pointer VLAN info to fill to.
2802  *
2803  * @return
2804  *   0 on success, a negative errno value otherwise and rte_errno is set.
2805  */
2806 static void
2807 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
2808                                   struct rte_vlan_hdr *vlan)
2809 {
2810         const struct rte_flow_item_vlan nic_mask = {
2811                 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
2812                                 MLX5DV_FLOW_VLAN_VID_MASK),
2813                 .inner_type = RTE_BE16(0xffff),
2814         };
2815
2816         if (items == NULL)
2817                 return;
2818         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2819                 int type = items->type;
2820
2821                 if (type == RTE_FLOW_ITEM_TYPE_VLAN ||
2822                     type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
2823                         break;
2824         }
2825         if (items->type != RTE_FLOW_ITEM_TYPE_END) {
2826                 const struct rte_flow_item_vlan *vlan_m = items->mask;
2827                 const struct rte_flow_item_vlan *vlan_v = items->spec;
2828
2829                 /* If VLAN item in pattern doesn't contain data, return here. */
2830                 if (!vlan_v)
2831                         return;
2832                 if (!vlan_m)
2833                         vlan_m = &nic_mask;
2834                 /* Only full match values are accepted */
2835                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
2836                      MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
2837                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
2838                         vlan->vlan_tci |=
2839                                 rte_be_to_cpu_16(vlan_v->tci &
2840                                                  MLX5DV_FLOW_VLAN_PCP_MASK_BE);
2841                 }
2842                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
2843                      MLX5DV_FLOW_VLAN_VID_MASK_BE) {
2844                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
2845                         vlan->vlan_tci |=
2846                                 rte_be_to_cpu_16(vlan_v->tci &
2847                                                  MLX5DV_FLOW_VLAN_VID_MASK_BE);
2848                 }
2849                 if (vlan_m->inner_type == nic_mask.inner_type)
2850                         vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
2851                                                            vlan_m->inner_type);
2852         }
2853 }
2854
2855 /**
2856  * Validate the push VLAN action.
2857  *
2858  * @param[in] dev
2859  *   Pointer to the rte_eth_dev structure.
2860  * @param[in] action_flags
2861  *   Holds the actions detected until now.
2862  * @param[in] item_flags
2863  *   The items found in this flow rule.
2864  * @param[in] action
2865  *   Pointer to the action structure.
2866  * @param[in] attr
2867  *   Pointer to flow attributes
2868  * @param[out] error
2869  *   Pointer to error structure.
2870  *
2871  * @return
2872  *   0 on success, a negative errno value otherwise and rte_errno is set.
2873  */
2874 static int
2875 flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
2876                                   uint64_t action_flags,
2877                                   const struct rte_flow_item_vlan *vlan_m,
2878                                   const struct rte_flow_action *action,
2879                                   const struct rte_flow_attr *attr,
2880                                   struct rte_flow_error *error)
2881 {
2882         const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
2883         const struct mlx5_priv *priv = dev->data->dev_private;
2884
2885         if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
2886             push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
2887                 return rte_flow_error_set(error, EINVAL,
2888                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2889                                           "invalid vlan ethertype");
2890         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2891                 return rte_flow_error_set(error, EINVAL,
2892                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2893                                           "wrong action order, port_id should "
2894                                           "be after push VLAN");
2895         if (!attr->transfer && priv->representor)
2896                 return rte_flow_error_set(error, ENOTSUP,
2897                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2898                                           "push vlan action for VF representor "
2899                                           "not supported on NIC table");
2900         if (vlan_m &&
2901             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) &&
2902             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) !=
2903                 MLX5DV_FLOW_VLAN_PCP_MASK_BE &&
2904             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) &&
2905             !(mlx5_flow_find_action
2906                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)))
2907                 return rte_flow_error_set(error, EINVAL,
2908                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2909                                           "not full match mask on VLAN PCP and "
2910                                           "there is no of_set_vlan_pcp action, "
2911                                           "push VLAN action cannot figure out "
2912                                           "PCP value");
2913         if (vlan_m &&
2914             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) &&
2915             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) !=
2916                 MLX5DV_FLOW_VLAN_VID_MASK_BE &&
2917             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) &&
2918             !(mlx5_flow_find_action
2919                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)))
2920                 return rte_flow_error_set(error, EINVAL,
2921                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2922                                           "not full match mask on VLAN VID and "
2923                                           "there is no of_set_vlan_vid action, "
2924                                           "push VLAN action cannot figure out "
2925                                           "VID value");
2926         (void)attr;
2927         return 0;
2928 }
2929
2930 /**
2931  * Validate the set VLAN PCP.
2932  *
2933  * @param[in] action_flags
2934  *   Holds the actions detected until now.
2935  * @param[in] actions
2936  *   Pointer to the list of actions remaining in the flow rule.
2937  * @param[out] error
2938  *   Pointer to error structure.
2939  *
2940  * @return
2941  *   0 on success, a negative errno value otherwise and rte_errno is set.
2942  */
2943 static int
2944 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
2945                                      const struct rte_flow_action actions[],
2946                                      struct rte_flow_error *error)
2947 {
2948         const struct rte_flow_action *action = actions;
2949         const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
2950
2951         if (conf->vlan_pcp > 7)
2952                 return rte_flow_error_set(error, EINVAL,
2953                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2954                                           "VLAN PCP value is too big");
2955         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
2956                 return rte_flow_error_set(error, ENOTSUP,
2957                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2958                                           "set VLAN PCP action must follow "
2959                                           "the push VLAN action");
2960         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
2961                 return rte_flow_error_set(error, ENOTSUP,
2962                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2963                                           "Multiple VLAN PCP modification are "
2964                                           "not supported");
2965         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2966                 return rte_flow_error_set(error, EINVAL,
2967                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2968                                           "wrong action order, port_id should "
2969                                           "be after set VLAN PCP");
2970         return 0;
2971 }
2972
2973 /**
2974  * Validate the set VLAN VID.
2975  *
2976  * @param[in] item_flags
2977  *   Holds the items detected in this rule.
2978  * @param[in] action_flags
2979  *   Holds the actions detected until now.
2980  * @param[in] actions
2981  *   Pointer to the list of actions remaining in the flow rule.
2982  * @param[out] error
2983  *   Pointer to error structure.
2984  *
2985  * @return
2986  *   0 on success, a negative errno value otherwise and rte_errno is set.
2987  */
2988 static int
2989 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
2990                                      uint64_t action_flags,
2991                                      const struct rte_flow_action actions[],
2992                                      struct rte_flow_error *error)
2993 {
2994         const struct rte_flow_action *action = actions;
2995         const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
2996
2997         if (rte_be_to_cpu_16(conf->vlan_vid) > 0xFFE)
2998                 return rte_flow_error_set(error, EINVAL,
2999                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3000                                           "VLAN VID value is too big");
3001         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
3002             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
3003                 return rte_flow_error_set(error, ENOTSUP,
3004                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3005                                           "set VLAN VID action must follow push"
3006                                           " VLAN action or match on VLAN item");
3007         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
3008                 return rte_flow_error_set(error, ENOTSUP,
3009                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3010                                           "Multiple VLAN VID modifications are "
3011                                           "not supported");
3012         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
3013                 return rte_flow_error_set(error, EINVAL,
3014                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3015                                           "wrong action order, port_id should "
3016                                           "be after set VLAN VID");
3017         return 0;
3018 }
3019
3020 /*
3021  * Validate the FLAG action.
3022  *
3023  * @param[in] dev
3024  *   Pointer to the rte_eth_dev structure.
3025  * @param[in] action_flags
3026  *   Holds the actions detected until now.
3027  * @param[in] attr
3028  *   Pointer to flow attributes
3029  * @param[out] error
3030  *   Pointer to error structure.
3031  *
3032  * @return
3033  *   0 on success, a negative errno value otherwise and rte_errno is set.
3034  */
3035 static int
3036 flow_dv_validate_action_flag(struct rte_eth_dev *dev,
3037                              uint64_t action_flags,
3038                              const struct rte_flow_attr *attr,
3039                              struct rte_flow_error *error)
3040 {
3041         struct mlx5_priv *priv = dev->data->dev_private;
3042         struct mlx5_dev_config *config = &priv->config;
3043         int ret;
3044
3045         /* Fall back if no extended metadata register support. */
3046         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
3047                 return mlx5_flow_validate_action_flag(action_flags, attr,
3048                                                       error);
3049         /* Extensive metadata mode requires registers. */
3050         if (!mlx5_flow_ext_mreg_supported(dev))
3051                 return rte_flow_error_set(error, ENOTSUP,
3052                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3053                                           "no metadata registers "
3054                                           "to support flag action");
3055         if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT))
3056                 return rte_flow_error_set(error, ENOTSUP,
3057                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3058                                           "extended metadata register"
3059                                           " isn't available");
3060         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3061         if (ret < 0)
3062                 return ret;
3063         MLX5_ASSERT(ret > 0);
3064         if (action_flags & MLX5_FLOW_ACTION_MARK)
3065                 return rte_flow_error_set(error, EINVAL,
3066                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3067                                           "can't mark and flag in same flow");
3068         if (action_flags & MLX5_FLOW_ACTION_FLAG)
3069                 return rte_flow_error_set(error, EINVAL,
3070                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3071                                           "can't have 2 flag"
3072                                           " actions in same flow");
3073         return 0;
3074 }
3075
3076 /**
3077  * Validate MARK action.
3078  *
3079  * @param[in] dev
3080  *   Pointer to the rte_eth_dev structure.
3081  * @param[in] action
3082  *   Pointer to action.
3083  * @param[in] action_flags
3084  *   Holds the actions detected until now.
3085  * @param[in] attr
3086  *   Pointer to flow attributes
3087  * @param[out] error
3088  *   Pointer to error structure.
3089  *
3090  * @return
3091  *   0 on success, a negative errno value otherwise and rte_errno is set.
3092  */
3093 static int
3094 flow_dv_validate_action_mark(struct rte_eth_dev *dev,
3095                              const struct rte_flow_action *action,
3096                              uint64_t action_flags,
3097                              const struct rte_flow_attr *attr,
3098                              struct rte_flow_error *error)
3099 {
3100         struct mlx5_priv *priv = dev->data->dev_private;
3101         struct mlx5_dev_config *config = &priv->config;
3102         const struct rte_flow_action_mark *mark = action->conf;
3103         int ret;
3104
3105         if (is_tunnel_offload_active(dev))
3106                 return rte_flow_error_set(error, ENOTSUP,
3107                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3108                                           "no mark action "
3109                                           "if tunnel offload active");
3110         /* Fall back if no extended metadata register support. */
3111         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
3112                 return mlx5_flow_validate_action_mark(action, action_flags,
3113                                                       attr, error);
3114         /* Extensive metadata mode requires registers. */
3115         if (!mlx5_flow_ext_mreg_supported(dev))
3116                 return rte_flow_error_set(error, ENOTSUP,
3117                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3118                                           "no metadata registers "
3119                                           "to support mark action");
3120         if (!priv->sh->dv_mark_mask)
3121                 return rte_flow_error_set(error, ENOTSUP,
3122                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3123                                           "extended metadata register"
3124                                           " isn't available");
3125         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3126         if (ret < 0)
3127                 return ret;
3128         MLX5_ASSERT(ret > 0);
3129         if (!mark)
3130                 return rte_flow_error_set(error, EINVAL,
3131                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3132                                           "configuration cannot be null");
3133         if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask))
3134                 return rte_flow_error_set(error, EINVAL,
3135                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3136                                           &mark->id,
3137                                           "mark id exceeds the limit");
3138         if (action_flags & MLX5_FLOW_ACTION_FLAG)
3139                 return rte_flow_error_set(error, EINVAL,
3140                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3141                                           "can't flag and mark in same flow");
3142         if (action_flags & MLX5_FLOW_ACTION_MARK)
3143                 return rte_flow_error_set(error, EINVAL,
3144                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3145                                           "can't have 2 mark actions in same"
3146                                           " flow");
3147         return 0;
3148 }
3149
3150 /**
3151  * Validate SET_META action.
3152  *
3153  * @param[in] dev
3154  *   Pointer to the rte_eth_dev structure.
3155  * @param[in] action
3156  *   Pointer to the action structure.
3157  * @param[in] action_flags
3158  *   Holds the actions detected until now.
3159  * @param[in] attr
3160  *   Pointer to flow attributes
3161  * @param[out] error
3162  *   Pointer to error structure.
3163  *
3164  * @return
3165  *   0 on success, a negative errno value otherwise and rte_errno is set.
3166  */
3167 static int
3168 flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
3169                                  const struct rte_flow_action *action,
3170                                  uint64_t action_flags __rte_unused,
3171                                  const struct rte_flow_attr *attr,
3172                                  struct rte_flow_error *error)
3173 {
3174         const struct rte_flow_action_set_meta *conf;
3175         uint32_t nic_mask = UINT32_MAX;
3176         int reg;
3177
3178         if (!mlx5_flow_ext_mreg_supported(dev))
3179                 return rte_flow_error_set(error, ENOTSUP,
3180                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3181                                           "extended metadata register"
3182                                           " isn't supported");
3183         reg = flow_dv_get_metadata_reg(dev, attr, error);
3184         if (reg < 0)
3185                 return reg;
3186         if (reg == REG_NON)
3187                 return rte_flow_error_set(error, ENOTSUP,
3188                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3189                                           "unavalable extended metadata register");
3190         if (reg != REG_A && reg != REG_B) {
3191                 struct mlx5_priv *priv = dev->data->dev_private;
3192
3193                 nic_mask = priv->sh->dv_meta_mask;
3194         }
3195         if (!(action->conf))
3196                 return rte_flow_error_set(error, EINVAL,
3197                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3198                                           "configuration cannot be null");
3199         conf = (const struct rte_flow_action_set_meta *)action->conf;
3200         if (!conf->mask)
3201                 return rte_flow_error_set(error, EINVAL,
3202                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3203                                           "zero mask doesn't have any effect");
3204         if (conf->mask & ~nic_mask)
3205                 return rte_flow_error_set(error, EINVAL,
3206                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3207                                           "meta data must be within reg C0");
3208         return 0;
3209 }
3210
3211 /**
3212  * Validate SET_TAG action.
3213  *
3214  * @param[in] dev
3215  *   Pointer to the rte_eth_dev structure.
3216  * @param[in] action
3217  *   Pointer to the action structure.
3218  * @param[in] action_flags
3219  *   Holds the actions detected until now.
3220  * @param[in] attr
3221  *   Pointer to flow attributes
3222  * @param[out] error
3223  *   Pointer to error structure.
3224  *
3225  * @return
3226  *   0 on success, a negative errno value otherwise and rte_errno is set.
3227  */
3228 static int
3229 flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
3230                                 const struct rte_flow_action *action,
3231                                 uint64_t action_flags,
3232                                 const struct rte_flow_attr *attr,
3233                                 struct rte_flow_error *error)
3234 {
3235         const struct rte_flow_action_set_tag *conf;
3236         const uint64_t terminal_action_flags =
3237                 MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE |
3238                 MLX5_FLOW_ACTION_RSS;
3239         int ret;
3240
3241         if (!mlx5_flow_ext_mreg_supported(dev))
3242                 return rte_flow_error_set(error, ENOTSUP,
3243                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3244                                           "extensive metadata register"
3245                                           " isn't supported");
3246         if (!(action->conf))
3247                 return rte_flow_error_set(error, EINVAL,
3248                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3249                                           "configuration cannot be null");
3250         conf = (const struct rte_flow_action_set_tag *)action->conf;
3251         if (!conf->mask)
3252                 return rte_flow_error_set(error, EINVAL,
3253                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3254                                           "zero mask doesn't have any effect");
3255         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
3256         if (ret < 0)
3257                 return ret;
3258         if (!attr->transfer && attr->ingress &&
3259             (action_flags & terminal_action_flags))
3260                 return rte_flow_error_set(error, EINVAL,
3261                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3262                                           "set_tag has no effect"
3263                                           " with terminal actions");
3264         return 0;
3265 }
3266
3267 /**
3268  * Check if action counter is shared by either old or new mechanism.
3269  *
3270  * @param[in] action
3271  *   Pointer to the action structure.
3272  *
3273  * @return
3274  *   True when counter is shared, false otherwise.
3275  */
3276 static inline bool
3277 is_shared_action_count(const struct rte_flow_action *action)
3278 {
3279         const struct rte_flow_action_count *count =
3280                         (const struct rte_flow_action_count *)action->conf;
3281
3282         if ((int)action->type == MLX5_RTE_FLOW_ACTION_TYPE_COUNT)
3283                 return true;
3284         return !!(count && count->shared);
3285 }
3286
3287 /**
3288  * Validate count action.
3289  *
3290  * @param[in] dev
3291  *   Pointer to rte_eth_dev structure.
3292  * @param[in] shared
3293  *   Indicator if action is shared.
3294  * @param[in] action_flags
3295  *   Holds the actions detected until now.
3296  * @param[out] error
3297  *   Pointer to error structure.
3298  *
3299  * @return
3300  *   0 on success, a negative errno value otherwise and rte_errno is set.
3301  */
3302 static int
3303 flow_dv_validate_action_count(struct rte_eth_dev *dev, bool shared,
3304                               uint64_t action_flags,
3305                               struct rte_flow_error *error)
3306 {
3307         struct mlx5_priv *priv = dev->data->dev_private;
3308
3309         if (!priv->config.devx)
3310                 goto notsup_err;
3311         if (action_flags & MLX5_FLOW_ACTION_COUNT)
3312                 return rte_flow_error_set(error, EINVAL,
3313                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3314                                           "duplicate count actions set");
3315         if (shared && (action_flags & MLX5_FLOW_ACTION_AGE) &&
3316             !priv->sh->flow_hit_aso_en)
3317                 return rte_flow_error_set(error, EINVAL,
3318                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3319                                           "old age and shared count combination is not supported");
3320 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
3321         return 0;
3322 #endif
3323 notsup_err:
3324         return rte_flow_error_set
3325                       (error, ENOTSUP,
3326                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3327                        NULL,
3328                        "count action not supported");
3329 }
3330
3331 /**
3332  * Validate the L2 encap action.
3333  *
3334  * @param[in] dev
3335  *   Pointer to the rte_eth_dev structure.
3336  * @param[in] action_flags
3337  *   Holds the actions detected until now.
3338  * @param[in] action
3339  *   Pointer to the action structure.
3340  * @param[in] attr
3341  *   Pointer to flow attributes.
3342  * @param[out] error
3343  *   Pointer to error structure.
3344  *
3345  * @return
3346  *   0 on success, a negative errno value otherwise and rte_errno is set.
3347  */
3348 static int
3349 flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
3350                                  uint64_t action_flags,
3351                                  const struct rte_flow_action *action,
3352                                  const struct rte_flow_attr *attr,
3353                                  struct rte_flow_error *error)
3354 {
3355         const struct mlx5_priv *priv = dev->data->dev_private;
3356
3357         if (!(action->conf))
3358                 return rte_flow_error_set(error, EINVAL,
3359                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3360                                           "configuration cannot be null");
3361         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3362                 return rte_flow_error_set(error, EINVAL,
3363                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3364                                           "can only have a single encap action "
3365                                           "in a flow");
3366         if (!attr->transfer && priv->representor)
3367                 return rte_flow_error_set(error, ENOTSUP,
3368                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3369                                           "encap action for VF representor "
3370                                           "not supported on NIC table");
3371         return 0;
3372 }
3373
3374 /**
3375  * Validate a decap action.
3376  *
3377  * @param[in] dev
3378  *   Pointer to the rte_eth_dev structure.
3379  * @param[in] action_flags
3380  *   Holds the actions detected until now.
3381  * @param[in] action
3382  *   Pointer to the action structure.
3383  * @param[in] item_flags
3384  *   Holds the items detected.
3385  * @param[in] attr
3386  *   Pointer to flow attributes
3387  * @param[out] error
3388  *   Pointer to error structure.
3389  *
3390  * @return
3391  *   0 on success, a negative errno value otherwise and rte_errno is set.
3392  */
3393 static int
3394 flow_dv_validate_action_decap(struct rte_eth_dev *dev,
3395                               uint64_t action_flags,
3396                               const struct rte_flow_action *action,
3397                               const uint64_t item_flags,
3398                               const struct rte_flow_attr *attr,
3399                               struct rte_flow_error *error)
3400 {
3401         const struct mlx5_priv *priv = dev->data->dev_private;
3402
3403         if (priv->config.hca_attr.scatter_fcs_w_decap_disable &&
3404             !priv->config.decap_en)
3405                 return rte_flow_error_set(error, ENOTSUP,
3406                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3407                                           "decap is not enabled");
3408         if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
3409                 return rte_flow_error_set(error, ENOTSUP,
3410                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3411                                           action_flags &
3412                                           MLX5_FLOW_ACTION_DECAP ? "can only "
3413                                           "have a single decap action" : "decap "
3414                                           "after encap is not supported");
3415         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
3416                 return rte_flow_error_set(error, EINVAL,
3417                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3418                                           "can't have decap action after"
3419                                           " modify action");
3420         if (attr->egress)
3421                 return rte_flow_error_set(error, ENOTSUP,
3422                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
3423                                           NULL,
3424                                           "decap action not supported for "
3425                                           "egress");
3426         if (!attr->transfer && priv->representor)
3427                 return rte_flow_error_set(error, ENOTSUP,
3428                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3429                                           "decap action for VF representor "
3430                                           "not supported on NIC table");
3431         if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP &&
3432             !(item_flags & MLX5_FLOW_LAYER_VXLAN))
3433                 return rte_flow_error_set(error, ENOTSUP,
3434                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3435                                 "VXLAN item should be present for VXLAN decap");
3436         return 0;
3437 }
3438
3439 const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,};
3440
3441 /**
3442  * Validate the raw encap and decap actions.
3443  *
3444  * @param[in] dev
3445  *   Pointer to the rte_eth_dev structure.
3446  * @param[in] decap
3447  *   Pointer to the decap action.
3448  * @param[in] encap
3449  *   Pointer to the encap action.
3450  * @param[in] attr
3451  *   Pointer to flow attributes
3452  * @param[in/out] action_flags
3453  *   Holds the actions detected until now.
3454  * @param[out] actions_n
3455  *   pointer to the number of actions counter.
3456  * @param[in] action
3457  *   Pointer to the action structure.
3458  * @param[in] item_flags
3459  *   Holds the items detected.
3460  * @param[out] error
3461  *   Pointer to error structure.
3462  *
3463  * @return
3464  *   0 on success, a negative errno value otherwise and rte_errno is set.
3465  */
3466 static int
3467 flow_dv_validate_action_raw_encap_decap
3468         (struct rte_eth_dev *dev,
3469          const struct rte_flow_action_raw_decap *decap,
3470          const struct rte_flow_action_raw_encap *encap,
3471          const struct rte_flow_attr *attr, uint64_t *action_flags,
3472          int *actions_n, const struct rte_flow_action *action,
3473          uint64_t item_flags, struct rte_flow_error *error)
3474 {
3475         const struct mlx5_priv *priv = dev->data->dev_private;
3476         int ret;
3477
3478         if (encap && (!encap->size || !encap->data))
3479                 return rte_flow_error_set(error, EINVAL,
3480                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3481                                           "raw encap data cannot be empty");
3482         if (decap && encap) {
3483                 if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE &&
3484                     encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
3485                         /* L3 encap. */
3486                         decap = NULL;
3487                 else if (encap->size <=
3488                            MLX5_ENCAPSULATION_DECISION_SIZE &&
3489                            decap->size >
3490                            MLX5_ENCAPSULATION_DECISION_SIZE)
3491                         /* L3 decap. */
3492                         encap = NULL;
3493                 else if (encap->size >
3494                            MLX5_ENCAPSULATION_DECISION_SIZE &&
3495                            decap->size >
3496                            MLX5_ENCAPSULATION_DECISION_SIZE)
3497                         /* 2 L2 actions: encap and decap. */
3498                         ;
3499                 else
3500                         return rte_flow_error_set(error,
3501                                 ENOTSUP,
3502                                 RTE_FLOW_ERROR_TYPE_ACTION,
3503                                 NULL, "unsupported too small "
3504                                 "raw decap and too small raw "
3505                                 "encap combination");
3506         }
3507         if (decap) {
3508                 ret = flow_dv_validate_action_decap(dev, *action_flags, action,
3509                                                     item_flags, attr, error);
3510                 if (ret < 0)
3511                         return ret;
3512                 *action_flags |= MLX5_FLOW_ACTION_DECAP;
3513                 ++(*actions_n);
3514         }
3515         if (encap) {
3516                 if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE)
3517                         return rte_flow_error_set(error, ENOTSUP,
3518                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3519                                                   NULL,
3520                                                   "small raw encap size");
3521                 if (*action_flags & MLX5_FLOW_ACTION_ENCAP)
3522                         return rte_flow_error_set(error, EINVAL,
3523                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3524                                                   NULL,
3525                                                   "more than one encap action");
3526                 if (!attr->transfer && priv->representor)
3527                         return rte_flow_error_set
3528                                         (error, ENOTSUP,
3529                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3530                                          "encap action for VF representor "
3531                                          "not supported on NIC table");
3532                 *action_flags |= MLX5_FLOW_ACTION_ENCAP;
3533                 ++(*actions_n);
3534         }
3535         return 0;
3536 }
3537
3538 /*
3539  * Validate the ASO CT action.
3540  *
3541  * @param[in] dev
3542  *   Pointer to the rte_eth_dev structure.
3543  * @param[in] action_flags
3544  *   Holds the actions detected until now.
3545  * @param[in] item_flags
3546  *   The items found in this flow rule.
3547  * @param[in] attr
3548  *   Pointer to flow attributes.
3549  * @param[out] error
3550  *   Pointer to error structure.
3551  *
3552  * @return
3553  *   0 on success, a negative errno value otherwise and rte_errno is set.
3554  */
3555 static int
3556 flow_dv_validate_action_aso_ct(struct rte_eth_dev *dev,
3557                                uint64_t action_flags,
3558                                uint64_t item_flags,
3559                                const struct rte_flow_attr *attr,
3560                                struct rte_flow_error *error)
3561 {
3562         RTE_SET_USED(dev);
3563
3564         if (attr->group == 0 && !attr->transfer)
3565                 return rte_flow_error_set(error, ENOTSUP,
3566                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3567                                           NULL,
3568                                           "Only support non-root table");
3569         if (action_flags & MLX5_FLOW_FATE_ACTIONS)
3570                 return rte_flow_error_set(error, ENOTSUP,
3571                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3572                                           "CT cannot follow a fate action");
3573         if ((action_flags & MLX5_FLOW_ACTION_METER) ||
3574             (action_flags & MLX5_FLOW_ACTION_AGE))
3575                 return rte_flow_error_set(error, EINVAL,
3576                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3577                                           "Only one ASO action is supported");
3578         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3579                 return rte_flow_error_set(error, EINVAL,
3580                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3581                                           "Encap cannot exist before CT");
3582         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
3583                 return rte_flow_error_set(error, EINVAL,
3584                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3585                                           "Not a outer TCP packet");
3586         return 0;
3587 }
3588
3589 int
3590 flow_dv_encap_decap_match_cb(void *tool_ctx __rte_unused,
3591                              struct mlx5_list_entry *entry, void *cb_ctx)
3592 {
3593         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3594         struct mlx5_flow_dv_encap_decap_resource *ctx_resource = ctx->data;
3595         struct mlx5_flow_dv_encap_decap_resource *resource;
3596
3597         resource = container_of(entry, struct mlx5_flow_dv_encap_decap_resource,
3598                                 entry);
3599         if (resource->reformat_type == ctx_resource->reformat_type &&
3600             resource->ft_type == ctx_resource->ft_type &&
3601             resource->flags == ctx_resource->flags &&
3602             resource->size == ctx_resource->size &&
3603             !memcmp((const void *)resource->buf,
3604                     (const void *)ctx_resource->buf,
3605                     resource->size))
3606                 return 0;
3607         return -1;
3608 }
3609
3610 struct mlx5_list_entry *
3611 flow_dv_encap_decap_create_cb(void *tool_ctx, void *cb_ctx)
3612 {
3613         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3614         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3615         struct mlx5dv_dr_domain *domain;
3616         struct mlx5_flow_dv_encap_decap_resource *ctx_resource = ctx->data;
3617         struct mlx5_flow_dv_encap_decap_resource *resource;
3618         uint32_t idx;
3619         int ret;
3620
3621         if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3622                 domain = sh->fdb_domain;
3623         else if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3624                 domain = sh->rx_domain;
3625         else
3626                 domain = sh->tx_domain;
3627         /* Register new encap/decap resource. */
3628         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], &idx);
3629         if (!resource) {
3630                 rte_flow_error_set(ctx->error, ENOMEM,
3631                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3632                                    "cannot allocate resource memory");
3633                 return NULL;
3634         }
3635         *resource = *ctx_resource;
3636         resource->idx = idx;
3637         ret = mlx5_flow_os_create_flow_action_packet_reformat(sh->ctx, domain,
3638                                                               resource,
3639                                                              &resource->action);
3640         if (ret) {
3641                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
3642                 rte_flow_error_set(ctx->error, ENOMEM,
3643                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3644                                    NULL, "cannot create action");
3645                 return NULL;
3646         }
3647
3648         return &resource->entry;
3649 }
3650
3651 struct mlx5_list_entry *
3652 flow_dv_encap_decap_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
3653                              void *cb_ctx)
3654 {
3655         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3656         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3657         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
3658         uint32_t idx;
3659
3660         cache_resource = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
3661                                            &idx);
3662         if (!cache_resource) {
3663                 rte_flow_error_set(ctx->error, ENOMEM,
3664                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3665                                    "cannot allocate resource memory");
3666                 return NULL;
3667         }
3668         memcpy(cache_resource, oentry, sizeof(*cache_resource));
3669         cache_resource->idx = idx;
3670         return &cache_resource->entry;
3671 }
3672
3673 void
3674 flow_dv_encap_decap_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
3675 {
3676         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3677         struct mlx5_flow_dv_encap_decap_resource *res =
3678                                        container_of(entry, typeof(*res), entry);
3679
3680         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
3681 }
3682
3683 /**
3684  * Find existing encap/decap resource or create and register a new one.
3685  *
3686  * @param[in, out] dev
3687  *   Pointer to rte_eth_dev structure.
3688  * @param[in, out] resource
3689  *   Pointer to encap/decap resource.
3690  * @parm[in, out] dev_flow
3691  *   Pointer to the dev_flow.
3692  * @param[out] error
3693  *   pointer to error structure.
3694  *
3695  * @return
3696  *   0 on success otherwise -errno and errno is set.
3697  */
3698 static int
3699 flow_dv_encap_decap_resource_register
3700                         (struct rte_eth_dev *dev,
3701                          struct mlx5_flow_dv_encap_decap_resource *resource,
3702                          struct mlx5_flow *dev_flow,
3703                          struct rte_flow_error *error)
3704 {
3705         struct mlx5_priv *priv = dev->data->dev_private;
3706         struct mlx5_dev_ctx_shared *sh = priv->sh;
3707         struct mlx5_list_entry *entry;
3708         union {
3709                 struct {
3710                         uint32_t ft_type:8;
3711                         uint32_t refmt_type:8;
3712                         /*
3713                          * Header reformat actions can be shared between
3714                          * non-root tables. One bit to indicate non-root
3715                          * table or not.
3716                          */
3717                         uint32_t is_root:1;
3718                         uint32_t reserve:15;
3719                 };
3720                 uint32_t v32;
3721         } encap_decap_key = {
3722                 {
3723                         .ft_type = resource->ft_type,
3724                         .refmt_type = resource->reformat_type,
3725                         .is_root = !!dev_flow->dv.group,
3726                         .reserve = 0,
3727                 }
3728         };
3729         struct mlx5_flow_cb_ctx ctx = {
3730                 .error = error,
3731                 .data = resource,
3732         };
3733         uint64_t key64;
3734
3735         resource->flags = dev_flow->dv.group ? 0 : 1;
3736         key64 =  __rte_raw_cksum(&encap_decap_key.v32,
3737                                  sizeof(encap_decap_key.v32), 0);
3738         if (resource->reformat_type !=
3739             MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2 &&
3740             resource->size)
3741                 key64 = __rte_raw_cksum(resource->buf, resource->size, key64);
3742         entry = mlx5_hlist_register(sh->encaps_decaps, key64, &ctx);
3743         if (!entry)
3744                 return -rte_errno;
3745         resource = container_of(entry, typeof(*resource), entry);
3746         dev_flow->dv.encap_decap = resource;
3747         dev_flow->handle->dvh.rix_encap_decap = resource->idx;
3748         return 0;
3749 }
3750
3751 /**
3752  * Find existing table jump resource or create and register a new one.
3753  *
3754  * @param[in, out] dev
3755  *   Pointer to rte_eth_dev structure.
3756  * @param[in, out] tbl
3757  *   Pointer to flow table resource.
3758  * @parm[in, out] dev_flow
3759  *   Pointer to the dev_flow.
3760  * @param[out] error
3761  *   pointer to error structure.
3762  *
3763  * @return
3764  *   0 on success otherwise -errno and errno is set.
3765  */
3766 static int
3767 flow_dv_jump_tbl_resource_register
3768                         (struct rte_eth_dev *dev __rte_unused,
3769                          struct mlx5_flow_tbl_resource *tbl,
3770                          struct mlx5_flow *dev_flow,
3771                          struct rte_flow_error *error __rte_unused)
3772 {
3773         struct mlx5_flow_tbl_data_entry *tbl_data =
3774                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
3775
3776         MLX5_ASSERT(tbl);
3777         MLX5_ASSERT(tbl_data->jump.action);
3778         dev_flow->handle->rix_jump = tbl_data->idx;
3779         dev_flow->dv.jump = &tbl_data->jump;
3780         return 0;
3781 }
3782
3783 int
3784 flow_dv_port_id_match_cb(void *tool_ctx __rte_unused,
3785                          struct mlx5_list_entry *entry, void *cb_ctx)
3786 {
3787         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3788         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3789         struct mlx5_flow_dv_port_id_action_resource *res =
3790                                        container_of(entry, typeof(*res), entry);
3791
3792         return ref->port_id != res->port_id;
3793 }
3794
3795 struct mlx5_list_entry *
3796 flow_dv_port_id_create_cb(void *tool_ctx, void *cb_ctx)
3797 {
3798         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3799         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3800         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3801         struct mlx5_flow_dv_port_id_action_resource *resource;
3802         uint32_t idx;
3803         int ret;
3804
3805         /* Register new port id action resource. */
3806         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
3807         if (!resource) {
3808                 rte_flow_error_set(ctx->error, ENOMEM,
3809                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3810                                    "cannot allocate port_id action memory");
3811                 return NULL;
3812         }
3813         *resource = *ref;
3814         ret = mlx5_flow_os_create_flow_action_dest_port(sh->fdb_domain,
3815                                                         ref->port_id,
3816                                                         &resource->action);
3817         if (ret) {
3818                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], idx);
3819                 rte_flow_error_set(ctx->error, ENOMEM,
3820                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3821                                    "cannot create action");
3822                 return NULL;
3823         }
3824         resource->idx = idx;
3825         return &resource->entry;
3826 }
3827
3828 struct mlx5_list_entry *
3829 flow_dv_port_id_clone_cb(void *tool_ctx,
3830                          struct mlx5_list_entry *entry __rte_unused,
3831                          void *cb_ctx)
3832 {
3833         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3834         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3835         struct mlx5_flow_dv_port_id_action_resource *resource;
3836         uint32_t idx;
3837
3838         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
3839         if (!resource) {
3840                 rte_flow_error_set(ctx->error, ENOMEM,
3841                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3842                                    "cannot allocate port_id action memory");
3843                 return NULL;
3844         }
3845         memcpy(resource, entry, sizeof(*resource));
3846         resource->idx = idx;
3847         return &resource->entry;
3848 }
3849
3850 void
3851 flow_dv_port_id_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
3852 {
3853         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3854         struct mlx5_flow_dv_port_id_action_resource *resource =
3855                                   container_of(entry, typeof(*resource), entry);
3856
3857         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], resource->idx);
3858 }
3859
3860 /**
3861  * Find existing table port ID resource or create and register a new one.
3862  *
3863  * @param[in, out] dev
3864  *   Pointer to rte_eth_dev structure.
3865  * @param[in, out] ref
3866  *   Pointer to port ID action resource reference.
3867  * @parm[in, out] dev_flow
3868  *   Pointer to the dev_flow.
3869  * @param[out] error
3870  *   pointer to error structure.
3871  *
3872  * @return
3873  *   0 on success otherwise -errno and errno is set.
3874  */
3875 static int
3876 flow_dv_port_id_action_resource_register
3877                         (struct rte_eth_dev *dev,
3878                          struct mlx5_flow_dv_port_id_action_resource *ref,
3879                          struct mlx5_flow *dev_flow,
3880                          struct rte_flow_error *error)
3881 {
3882         struct mlx5_priv *priv = dev->data->dev_private;
3883         struct mlx5_list_entry *entry;
3884         struct mlx5_flow_dv_port_id_action_resource *resource;
3885         struct mlx5_flow_cb_ctx ctx = {
3886                 .error = error,
3887                 .data = ref,
3888         };
3889
3890         entry = mlx5_list_register(priv->sh->port_id_action_list, &ctx);
3891         if (!entry)
3892                 return -rte_errno;
3893         resource = container_of(entry, typeof(*resource), entry);
3894         dev_flow->dv.port_id_action = resource;
3895         dev_flow->handle->rix_port_id_action = resource->idx;
3896         return 0;
3897 }
3898
3899 int
3900 flow_dv_push_vlan_match_cb(void *tool_ctx __rte_unused,
3901                            struct mlx5_list_entry *entry, void *cb_ctx)
3902 {
3903         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3904         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3905         struct mlx5_flow_dv_push_vlan_action_resource *res =
3906                                        container_of(entry, typeof(*res), entry);
3907
3908         return ref->vlan_tag != res->vlan_tag || ref->ft_type != res->ft_type;
3909 }
3910
3911 struct mlx5_list_entry *
3912 flow_dv_push_vlan_create_cb(void *tool_ctx, void *cb_ctx)
3913 {
3914         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3915         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3916         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3917         struct mlx5_flow_dv_push_vlan_action_resource *resource;
3918         struct mlx5dv_dr_domain *domain;
3919         uint32_t idx;
3920         int ret;
3921
3922         /* Register new port id action resource. */
3923         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
3924         if (!resource) {
3925                 rte_flow_error_set(ctx->error, ENOMEM,
3926                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3927                                    "cannot allocate push_vlan action memory");
3928                 return NULL;
3929         }
3930         *resource = *ref;
3931         if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3932                 domain = sh->fdb_domain;
3933         else if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3934                 domain = sh->rx_domain;
3935         else
3936                 domain = sh->tx_domain;
3937         ret = mlx5_flow_os_create_flow_action_push_vlan(domain, ref->vlan_tag,
3938                                                         &resource->action);
3939         if (ret) {
3940                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
3941                 rte_flow_error_set(ctx->error, ENOMEM,
3942                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3943                                    "cannot create push vlan action");
3944                 return NULL;
3945         }
3946         resource->idx = idx;
3947         return &resource->entry;
3948 }
3949
3950 struct mlx5_list_entry *
3951 flow_dv_push_vlan_clone_cb(void *tool_ctx,
3952                            struct mlx5_list_entry *entry __rte_unused,
3953                            void *cb_ctx)
3954 {
3955         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3956         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3957         struct mlx5_flow_dv_push_vlan_action_resource *resource;
3958         uint32_t idx;
3959
3960         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
3961         if (!resource) {
3962                 rte_flow_error_set(ctx->error, ENOMEM,
3963                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3964                                    "cannot allocate push_vlan action memory");
3965                 return NULL;
3966         }
3967         memcpy(resource, entry, sizeof(*resource));
3968         resource->idx = idx;
3969         return &resource->entry;
3970 }
3971
3972 void
3973 flow_dv_push_vlan_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
3974 {
3975         struct mlx5_dev_ctx_shared *sh = tool_ctx;
3976         struct mlx5_flow_dv_push_vlan_action_resource *resource =
3977                                   container_of(entry, typeof(*resource), entry);
3978
3979         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], resource->idx);
3980 }
3981
3982 /**
3983  * Find existing push vlan resource or create and register a new one.
3984  *
3985  * @param [in, out] dev
3986  *   Pointer to rte_eth_dev structure.
3987  * @param[in, out] ref
3988  *   Pointer to port ID action resource reference.
3989  * @parm[in, out] dev_flow
3990  *   Pointer to the dev_flow.
3991  * @param[out] error
3992  *   pointer to error structure.
3993  *
3994  * @return
3995  *   0 on success otherwise -errno and errno is set.
3996  */
3997 static int
3998 flow_dv_push_vlan_action_resource_register
3999                        (struct rte_eth_dev *dev,
4000                         struct mlx5_flow_dv_push_vlan_action_resource *ref,
4001                         struct mlx5_flow *dev_flow,
4002                         struct rte_flow_error *error)
4003 {
4004         struct mlx5_priv *priv = dev->data->dev_private;
4005         struct mlx5_flow_dv_push_vlan_action_resource *resource;
4006         struct mlx5_list_entry *entry;
4007         struct mlx5_flow_cb_ctx ctx = {
4008                 .error = error,
4009                 .data = ref,
4010         };
4011
4012         entry = mlx5_list_register(priv->sh->push_vlan_action_list, &ctx);
4013         if (!entry)
4014                 return -rte_errno;
4015         resource = container_of(entry, typeof(*resource), entry);
4016
4017         dev_flow->handle->dvh.rix_push_vlan = resource->idx;
4018         dev_flow->dv.push_vlan_res = resource;
4019         return 0;
4020 }
4021
4022 /**
4023  * Get the size of specific rte_flow_item_type hdr size
4024  *
4025  * @param[in] item_type
4026  *   Tested rte_flow_item_type.
4027  *
4028  * @return
4029  *   sizeof struct item_type, 0 if void or irrelevant.
4030  */
4031 static size_t
4032 flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type)
4033 {
4034         size_t retval;
4035
4036         switch (item_type) {
4037         case RTE_FLOW_ITEM_TYPE_ETH:
4038                 retval = sizeof(struct rte_ether_hdr);
4039                 break;
4040         case RTE_FLOW_ITEM_TYPE_VLAN:
4041                 retval = sizeof(struct rte_vlan_hdr);
4042                 break;
4043         case RTE_FLOW_ITEM_TYPE_IPV4:
4044                 retval = sizeof(struct rte_ipv4_hdr);
4045                 break;
4046         case RTE_FLOW_ITEM_TYPE_IPV6:
4047                 retval = sizeof(struct rte_ipv6_hdr);
4048                 break;
4049         case RTE_FLOW_ITEM_TYPE_UDP:
4050                 retval = sizeof(struct rte_udp_hdr);
4051                 break;
4052         case RTE_FLOW_ITEM_TYPE_TCP:
4053                 retval = sizeof(struct rte_tcp_hdr);
4054                 break;
4055         case RTE_FLOW_ITEM_TYPE_VXLAN:
4056         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
4057                 retval = sizeof(struct rte_vxlan_hdr);
4058                 break;
4059         case RTE_FLOW_ITEM_TYPE_GRE:
4060         case RTE_FLOW_ITEM_TYPE_NVGRE:
4061                 retval = sizeof(struct rte_gre_hdr);
4062                 break;
4063         case RTE_FLOW_ITEM_TYPE_MPLS:
4064                 retval = sizeof(struct rte_mpls_hdr);
4065                 break;
4066         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
4067         default:
4068                 retval = 0;
4069                 break;
4070         }
4071         return retval;
4072 }
4073
4074 #define MLX5_ENCAP_IPV4_VERSION         0x40
4075 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
4076 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
4077 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
4078 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
4079 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
4080 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
4081
4082 /**
4083  * Convert the encap action data from list of rte_flow_item to raw buffer
4084  *
4085  * @param[in] items
4086  *   Pointer to rte_flow_item objects list.
4087  * @param[out] buf
4088  *   Pointer to the output buffer.
4089  * @param[out] size
4090  *   Pointer to the output buffer size.
4091  * @param[out] error
4092  *   Pointer to the error structure.
4093  *
4094  * @return
4095  *   0 on success, a negative errno value otherwise and rte_errno is set.
4096  */
4097 static int
4098 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
4099                            size_t *size, struct rte_flow_error *error)
4100 {
4101         struct rte_ether_hdr *eth = NULL;
4102         struct rte_vlan_hdr *vlan = NULL;
4103         struct rte_ipv4_hdr *ipv4 = NULL;
4104         struct rte_ipv6_hdr *ipv6 = NULL;
4105         struct rte_udp_hdr *udp = NULL;
4106         struct rte_vxlan_hdr *vxlan = NULL;
4107         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
4108         struct rte_gre_hdr *gre = NULL;
4109         size_t len;
4110         size_t temp_size = 0;
4111
4112         if (!items)
4113                 return rte_flow_error_set(error, EINVAL,
4114                                           RTE_FLOW_ERROR_TYPE_ACTION,
4115                                           NULL, "invalid empty data");
4116         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
4117                 len = flow_dv_get_item_hdr_len(items->type);
4118                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
4119                         return rte_flow_error_set(error, EINVAL,
4120                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4121                                                   (void *)items->type,
4122                                                   "items total size is too big"
4123                                                   " for encap action");
4124                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
4125                 switch (items->type) {
4126                 case RTE_FLOW_ITEM_TYPE_ETH:
4127                         eth = (struct rte_ether_hdr *)&buf[temp_size];
4128                         break;
4129                 case RTE_FLOW_ITEM_TYPE_VLAN:
4130                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
4131                         if (!eth)
4132                                 return rte_flow_error_set(error, EINVAL,
4133                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4134                                                 (void *)items->type,
4135                                                 "eth header not found");
4136                         if (!eth->ether_type)
4137                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
4138                         break;
4139                 case RTE_FLOW_ITEM_TYPE_IPV4:
4140                         ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
4141                         if (!vlan && !eth)
4142                                 return rte_flow_error_set(error, EINVAL,
4143                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4144                                                 (void *)items->type,
4145                                                 "neither eth nor vlan"
4146                                                 " header found");
4147                         if (vlan && !vlan->eth_proto)
4148                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
4149                         else if (eth && !eth->ether_type)
4150                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
4151                         if (!ipv4->version_ihl)
4152                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
4153                                                     MLX5_ENCAP_IPV4_IHL_MIN;
4154                         if (!ipv4->time_to_live)
4155                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
4156                         break;
4157                 case RTE_FLOW_ITEM_TYPE_IPV6:
4158                         ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
4159                         if (!vlan && !eth)
4160                                 return rte_flow_error_set(error, EINVAL,
4161                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4162                                                 (void *)items->type,
4163                                                 "neither eth nor vlan"
4164                                                 " header found");
4165                         if (vlan && !vlan->eth_proto)
4166                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
4167                         else if (eth && !eth->ether_type)
4168                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
4169                         if (!ipv6->vtc_flow)
4170                                 ipv6->vtc_flow =
4171                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
4172                         if (!ipv6->hop_limits)
4173                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
4174                         break;
4175                 case RTE_FLOW_ITEM_TYPE_UDP:
4176                         udp = (struct rte_udp_hdr *)&buf[temp_size];
4177                         if (!ipv4 && !ipv6)
4178                                 return rte_flow_error_set(error, EINVAL,
4179                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4180                                                 (void *)items->type,
4181                                                 "ip header not found");
4182                         if (ipv4 && !ipv4->next_proto_id)
4183                                 ipv4->next_proto_id = IPPROTO_UDP;
4184                         else if (ipv6 && !ipv6->proto)
4185                                 ipv6->proto = IPPROTO_UDP;
4186                         break;
4187                 case RTE_FLOW_ITEM_TYPE_VXLAN:
4188                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
4189                         if (!udp)
4190                                 return rte_flow_error_set(error, EINVAL,
4191                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4192                                                 (void *)items->type,
4193                                                 "udp header not found");
4194                         if (!udp->dst_port)
4195                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
4196                         if (!vxlan->vx_flags)
4197                                 vxlan->vx_flags =
4198                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
4199                         break;
4200                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
4201                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
4202                         if (!udp)
4203                                 return rte_flow_error_set(error, EINVAL,
4204                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4205                                                 (void *)items->type,
4206                                                 "udp header not found");
4207                         if (!vxlan_gpe->proto)
4208                                 return rte_flow_error_set(error, EINVAL,
4209                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4210                                                 (void *)items->type,
4211                                                 "next protocol not found");
4212                         if (!udp->dst_port)
4213                                 udp->dst_port =
4214                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
4215                         if (!vxlan_gpe->vx_flags)
4216                                 vxlan_gpe->vx_flags =
4217                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
4218                         break;
4219                 case RTE_FLOW_ITEM_TYPE_GRE:
4220                 case RTE_FLOW_ITEM_TYPE_NVGRE:
4221                         gre = (struct rte_gre_hdr *)&buf[temp_size];
4222                         if (!gre->proto)
4223                                 return rte_flow_error_set(error, EINVAL,
4224                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4225                                                 (void *)items->type,
4226                                                 "next protocol not found");
4227                         if (!ipv4 && !ipv6)
4228                                 return rte_flow_error_set(error, EINVAL,
4229                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4230                                                 (void *)items->type,
4231                                                 "ip header not found");
4232                         if (ipv4 && !ipv4->next_proto_id)
4233                                 ipv4->next_proto_id = IPPROTO_GRE;
4234                         else if (ipv6 && !ipv6->proto)
4235                                 ipv6->proto = IPPROTO_GRE;
4236                         break;
4237                 case RTE_FLOW_ITEM_TYPE_VOID:
4238                         break;
4239                 default:
4240                         return rte_flow_error_set(error, EINVAL,
4241                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4242                                                   (void *)items->type,
4243                                                   "unsupported item type");
4244                         break;
4245                 }
4246                 temp_size += len;
4247         }
4248         *size = temp_size;
4249         return 0;
4250 }
4251
4252 static int
4253 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
4254 {
4255         struct rte_ether_hdr *eth = NULL;
4256         struct rte_vlan_hdr *vlan = NULL;
4257         struct rte_ipv6_hdr *ipv6 = NULL;
4258         struct rte_udp_hdr *udp = NULL;
4259         char *next_hdr;
4260         uint16_t proto;
4261
4262         eth = (struct rte_ether_hdr *)data;
4263         next_hdr = (char *)(eth + 1);
4264         proto = RTE_BE16(eth->ether_type);
4265
4266         /* VLAN skipping */
4267         while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
4268                 vlan = (struct rte_vlan_hdr *)next_hdr;
4269                 proto = RTE_BE16(vlan->eth_proto);
4270                 next_hdr += sizeof(struct rte_vlan_hdr);
4271         }
4272
4273         /* HW calculates IPv4 csum. no need to proceed */
4274         if (proto == RTE_ETHER_TYPE_IPV4)
4275                 return 0;
4276
4277         /* non IPv4/IPv6 header. not supported */
4278         if (proto != RTE_ETHER_TYPE_IPV6) {
4279                 return rte_flow_error_set(error, ENOTSUP,
4280                                           RTE_FLOW_ERROR_TYPE_ACTION,
4281                                           NULL, "Cannot offload non IPv4/IPv6");
4282         }
4283
4284         ipv6 = (struct rte_ipv6_hdr *)next_hdr;
4285
4286         /* ignore non UDP */
4287         if (ipv6->proto != IPPROTO_UDP)
4288                 return 0;
4289
4290         udp = (struct rte_udp_hdr *)(ipv6 + 1);
4291         udp->dgram_cksum = 0;
4292
4293         return 0;
4294 }
4295
4296 /**
4297  * Convert L2 encap action to DV specification.
4298  *
4299  * @param[in] dev
4300  *   Pointer to rte_eth_dev structure.
4301  * @param[in] action
4302  *   Pointer to action structure.
4303  * @param[in, out] dev_flow
4304  *   Pointer to the mlx5_flow.
4305  * @param[in] transfer
4306  *   Mark if the flow is E-Switch flow.
4307  * @param[out] error
4308  *   Pointer to the error structure.
4309  *
4310  * @return
4311  *   0 on success, a negative errno value otherwise and rte_errno is set.
4312  */
4313 static int
4314 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
4315                                const struct rte_flow_action *action,
4316                                struct mlx5_flow *dev_flow,
4317                                uint8_t transfer,
4318                                struct rte_flow_error *error)
4319 {
4320         const struct rte_flow_item *encap_data;
4321         const struct rte_flow_action_raw_encap *raw_encap_data;
4322         struct mlx5_flow_dv_encap_decap_resource res = {
4323                 .reformat_type =
4324                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
4325                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4326                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
4327         };
4328
4329         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
4330                 raw_encap_data =
4331                         (const struct rte_flow_action_raw_encap *)action->conf;
4332                 res.size = raw_encap_data->size;
4333                 memcpy(res.buf, raw_encap_data->data, res.size);
4334         } else {
4335                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
4336                         encap_data =
4337                                 ((const struct rte_flow_action_vxlan_encap *)
4338                                                 action->conf)->definition;
4339                 else
4340                         encap_data =
4341                                 ((const struct rte_flow_action_nvgre_encap *)
4342                                                 action->conf)->definition;
4343                 if (flow_dv_convert_encap_data(encap_data, res.buf,
4344                                                &res.size, error))
4345                         return -rte_errno;
4346         }
4347         if (flow_dv_zero_encap_udp_csum(res.buf, error))
4348                 return -rte_errno;
4349         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4350                 return rte_flow_error_set(error, EINVAL,
4351                                           RTE_FLOW_ERROR_TYPE_ACTION,
4352                                           NULL, "can't create L2 encap action");
4353         return 0;
4354 }
4355
4356 /**
4357  * Convert L2 decap action to DV specification.
4358  *
4359  * @param[in] dev
4360  *   Pointer to rte_eth_dev structure.
4361  * @param[in, out] dev_flow
4362  *   Pointer to the mlx5_flow.
4363  * @param[in] transfer
4364  *   Mark if the flow is E-Switch flow.
4365  * @param[out] error
4366  *   Pointer to the error structure.
4367  *
4368  * @return
4369  *   0 on success, a negative errno value otherwise and rte_errno is set.
4370  */
4371 static int
4372 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
4373                                struct mlx5_flow *dev_flow,
4374                                uint8_t transfer,
4375                                struct rte_flow_error *error)
4376 {
4377         struct mlx5_flow_dv_encap_decap_resource res = {
4378                 .size = 0,
4379                 .reformat_type =
4380                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
4381                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4382                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
4383         };
4384
4385         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4386                 return rte_flow_error_set(error, EINVAL,
4387                                           RTE_FLOW_ERROR_TYPE_ACTION,
4388                                           NULL, "can't create L2 decap action");
4389         return 0;
4390 }
4391
4392 /**
4393  * Convert raw decap/encap (L3 tunnel) action to DV specification.
4394  *
4395  * @param[in] dev
4396  *   Pointer to rte_eth_dev structure.
4397  * @param[in] action
4398  *   Pointer to action structure.
4399  * @param[in, out] dev_flow
4400  *   Pointer to the mlx5_flow.
4401  * @param[in] attr
4402  *   Pointer to the flow attributes.
4403  * @param[out] error
4404  *   Pointer to the error structure.
4405  *
4406  * @return
4407  *   0 on success, a negative errno value otherwise and rte_errno is set.
4408  */
4409 static int
4410 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
4411                                 const struct rte_flow_action *action,
4412                                 struct mlx5_flow *dev_flow,
4413                                 const struct rte_flow_attr *attr,
4414                                 struct rte_flow_error *error)
4415 {
4416         const struct rte_flow_action_raw_encap *encap_data;
4417         struct mlx5_flow_dv_encap_decap_resource res;
4418
4419         memset(&res, 0, sizeof(res));
4420         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
4421         res.size = encap_data->size;
4422         memcpy(res.buf, encap_data->data, res.size);
4423         res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
4424                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
4425                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
4426         if (attr->transfer)
4427                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4428         else
4429                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4430                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4431         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4432                 return rte_flow_error_set(error, EINVAL,
4433                                           RTE_FLOW_ERROR_TYPE_ACTION,
4434                                           NULL, "can't create encap action");
4435         return 0;
4436 }
4437
4438 /**
4439  * Create action push VLAN.
4440  *
4441  * @param[in] dev
4442  *   Pointer to rte_eth_dev structure.
4443  * @param[in] attr
4444  *   Pointer to the flow attributes.
4445  * @param[in] vlan
4446  *   Pointer to the vlan to push to the Ethernet header.
4447  * @param[in, out] dev_flow
4448  *   Pointer to the mlx5_flow.
4449  * @param[out] error
4450  *   Pointer to the error structure.
4451  *
4452  * @return
4453  *   0 on success, a negative errno value otherwise and rte_errno is set.
4454  */
4455 static int
4456 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
4457                                 const struct rte_flow_attr *attr,
4458                                 const struct rte_vlan_hdr *vlan,
4459                                 struct mlx5_flow *dev_flow,
4460                                 struct rte_flow_error *error)
4461 {
4462         struct mlx5_flow_dv_push_vlan_action_resource res;
4463
4464         memset(&res, 0, sizeof(res));
4465         res.vlan_tag =
4466                 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
4467                                  vlan->vlan_tci);
4468         if (attr->transfer)
4469                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4470         else
4471                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4472                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4473         return flow_dv_push_vlan_action_resource_register
4474                                             (dev, &res, dev_flow, error);
4475 }
4476
4477 /**
4478  * Validate the modify-header actions.
4479  *
4480  * @param[in] action_flags
4481  *   Holds the actions detected until now.
4482  * @param[in] action
4483  *   Pointer to the modify action.
4484  * @param[out] error
4485  *   Pointer to error structure.
4486  *
4487  * @return
4488  *   0 on success, a negative errno value otherwise and rte_errno is set.
4489  */
4490 static int
4491 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
4492                                    const struct rte_flow_action *action,
4493                                    struct rte_flow_error *error)
4494 {
4495         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
4496                 return rte_flow_error_set(error, EINVAL,
4497                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4498                                           NULL, "action configuration not set");
4499         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
4500                 return rte_flow_error_set(error, EINVAL,
4501                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4502                                           "can't have encap action before"
4503                                           " modify action");
4504         return 0;
4505 }
4506
4507 /**
4508  * Validate the modify-header MAC address actions.
4509  *
4510  * @param[in] action_flags
4511  *   Holds the actions detected until now.
4512  * @param[in] action
4513  *   Pointer to the modify action.
4514  * @param[in] item_flags
4515  *   Holds the items detected.
4516  * @param[out] error
4517  *   Pointer to error structure.
4518  *
4519  * @return
4520  *   0 on success, a negative errno value otherwise and rte_errno is set.
4521  */
4522 static int
4523 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
4524                                    const struct rte_flow_action *action,
4525                                    const uint64_t item_flags,
4526                                    struct rte_flow_error *error)
4527 {
4528         int ret = 0;
4529
4530         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4531         if (!ret) {
4532                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
4533                         return rte_flow_error_set(error, EINVAL,
4534                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4535                                                   NULL,
4536                                                   "no L2 item in pattern");
4537         }
4538         return ret;
4539 }
4540
4541 /**
4542  * Validate the modify-header IPv4 address actions.
4543  *
4544  * @param[in] action_flags
4545  *   Holds the actions detected until now.
4546  * @param[in] action
4547  *   Pointer to the modify action.
4548  * @param[in] item_flags
4549  *   Holds the items detected.
4550  * @param[out] error
4551  *   Pointer to error structure.
4552  *
4553  * @return
4554  *   0 on success, a negative errno value otherwise and rte_errno is set.
4555  */
4556 static int
4557 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
4558                                     const struct rte_flow_action *action,
4559                                     const uint64_t item_flags,
4560                                     struct rte_flow_error *error)
4561 {
4562         int ret = 0;
4563         uint64_t layer;
4564
4565         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4566         if (!ret) {
4567                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4568                                  MLX5_FLOW_LAYER_INNER_L3_IPV4 :
4569                                  MLX5_FLOW_LAYER_OUTER_L3_IPV4;
4570                 if (!(item_flags & layer))
4571                         return rte_flow_error_set(error, EINVAL,
4572                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4573                                                   NULL,
4574                                                   "no ipv4 item in pattern");
4575         }
4576         return ret;
4577 }
4578
4579 /**
4580  * Validate the modify-header IPv6 address actions.
4581  *
4582  * @param[in] action_flags
4583  *   Holds the actions detected until now.
4584  * @param[in] action
4585  *   Pointer to the modify action.
4586  * @param[in] item_flags
4587  *   Holds the items detected.
4588  * @param[out] error
4589  *   Pointer to error structure.
4590  *
4591  * @return
4592  *   0 on success, a negative errno value otherwise and rte_errno is set.
4593  */
4594 static int
4595 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
4596                                     const struct rte_flow_action *action,
4597                                     const uint64_t item_flags,
4598                                     struct rte_flow_error *error)
4599 {
4600         int ret = 0;
4601         uint64_t layer;
4602
4603         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4604         if (!ret) {
4605                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4606                                  MLX5_FLOW_LAYER_INNER_L3_IPV6 :
4607                                  MLX5_FLOW_LAYER_OUTER_L3_IPV6;
4608                 if (!(item_flags & layer))
4609                         return rte_flow_error_set(error, EINVAL,
4610                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4611                                                   NULL,
4612                                                   "no ipv6 item in pattern");
4613         }
4614         return ret;
4615 }
4616
4617 /**
4618  * Validate the modify-header TP actions.
4619  *
4620  * @param[in] action_flags
4621  *   Holds the actions detected until now.
4622  * @param[in] action
4623  *   Pointer to the modify action.
4624  * @param[in] item_flags
4625  *   Holds the items detected.
4626  * @param[out] error
4627  *   Pointer to error structure.
4628  *
4629  * @return
4630  *   0 on success, a negative errno value otherwise and rte_errno is set.
4631  */
4632 static int
4633 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
4634                                   const struct rte_flow_action *action,
4635                                   const uint64_t item_flags,
4636                                   struct rte_flow_error *error)
4637 {
4638         int ret = 0;
4639         uint64_t layer;
4640
4641         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4642         if (!ret) {
4643                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4644                                  MLX5_FLOW_LAYER_INNER_L4 :
4645                                  MLX5_FLOW_LAYER_OUTER_L4;
4646                 if (!(item_flags & layer))
4647                         return rte_flow_error_set(error, EINVAL,
4648                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4649                                                   NULL, "no transport layer "
4650                                                   "in pattern");
4651         }
4652         return ret;
4653 }
4654
4655 /**
4656  * Validate the modify-header actions of increment/decrement
4657  * TCP Sequence-number.
4658  *
4659  * @param[in] action_flags
4660  *   Holds the actions detected until now.
4661  * @param[in] action
4662  *   Pointer to the modify action.
4663  * @param[in] item_flags
4664  *   Holds the items detected.
4665  * @param[out] error
4666  *   Pointer to error structure.
4667  *
4668  * @return
4669  *   0 on success, a negative errno value otherwise and rte_errno is set.
4670  */
4671 static int
4672 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
4673                                        const struct rte_flow_action *action,
4674                                        const uint64_t item_flags,
4675                                        struct rte_flow_error *error)
4676 {
4677         int ret = 0;
4678         uint64_t layer;
4679
4680         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4681         if (!ret) {
4682                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4683                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
4684                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
4685                 if (!(item_flags & layer))
4686                         return rte_flow_error_set(error, EINVAL,
4687                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4688                                                   NULL, "no TCP item in"
4689                                                   " pattern");
4690                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
4691                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
4692                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
4693                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
4694                         return rte_flow_error_set(error, EINVAL,
4695                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4696                                                   NULL,
4697                                                   "cannot decrease and increase"
4698                                                   " TCP sequence number"
4699                                                   " at the same time");
4700         }
4701         return ret;
4702 }
4703
4704 /**
4705  * Validate the modify-header actions of increment/decrement
4706  * TCP Acknowledgment number.
4707  *
4708  * @param[in] action_flags
4709  *   Holds the actions detected until now.
4710  * @param[in] action
4711  *   Pointer to the modify action.
4712  * @param[in] item_flags
4713  *   Holds the items detected.
4714  * @param[out] error
4715  *   Pointer to error structure.
4716  *
4717  * @return
4718  *   0 on success, a negative errno value otherwise and rte_errno is set.
4719  */
4720 static int
4721 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
4722                                        const struct rte_flow_action *action,
4723                                        const uint64_t item_flags,
4724                                        struct rte_flow_error *error)
4725 {
4726         int ret = 0;
4727         uint64_t layer;
4728
4729         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4730         if (!ret) {
4731                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4732                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
4733                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
4734                 if (!(item_flags & layer))
4735                         return rte_flow_error_set(error, EINVAL,
4736                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4737                                                   NULL, "no TCP item in"
4738                                                   " pattern");
4739                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
4740                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
4741                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
4742                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
4743                         return rte_flow_error_set(error, EINVAL,
4744                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4745                                                   NULL,
4746                                                   "cannot decrease and increase"
4747                                                   " TCP acknowledgment number"
4748                                                   " at the same time");
4749         }
4750         return ret;
4751 }
4752
4753 /**
4754  * Validate the modify-header TTL actions.
4755  *
4756  * @param[in] action_flags
4757  *   Holds the actions detected until now.
4758  * @param[in] action
4759  *   Pointer to the modify action.
4760  * @param[in] item_flags
4761  *   Holds the items detected.
4762  * @param[out] error
4763  *   Pointer to error structure.
4764  *
4765  * @return
4766  *   0 on success, a negative errno value otherwise and rte_errno is set.
4767  */
4768 static int
4769 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
4770                                    const struct rte_flow_action *action,
4771                                    const uint64_t item_flags,
4772                                    struct rte_flow_error *error)
4773 {
4774         int ret = 0;
4775         uint64_t layer;
4776
4777         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4778         if (!ret) {
4779                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4780                                  MLX5_FLOW_LAYER_INNER_L3 :
4781                                  MLX5_FLOW_LAYER_OUTER_L3;
4782                 if (!(item_flags & layer))
4783                         return rte_flow_error_set(error, EINVAL,
4784                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4785                                                   NULL,
4786                                                   "no IP protocol in pattern");
4787         }
4788         return ret;
4789 }
4790
4791 /**
4792  * Validate the generic modify field actions.
4793  * @param[in] dev
4794  *   Pointer to the rte_eth_dev structure.
4795  * @param[in] action_flags
4796  *   Holds the actions detected until now.
4797  * @param[in] action
4798  *   Pointer to the modify action.
4799  * @param[in] attr
4800  *   Pointer to the flow attributes.
4801  * @param[out] error
4802  *   Pointer to error structure.
4803  *
4804  * @return
4805  *   Number of header fields to modify (0 or more) on success,
4806  *   a negative errno value otherwise and rte_errno is set.
4807  */
4808 static int
4809 flow_dv_validate_action_modify_field(struct rte_eth_dev *dev,
4810                                    const uint64_t action_flags,
4811                                    const struct rte_flow_action *action,
4812                                    const struct rte_flow_attr *attr,
4813                                    struct rte_flow_error *error)
4814 {
4815         int ret = 0;
4816         struct mlx5_priv *priv = dev->data->dev_private;
4817         struct mlx5_dev_config *config = &priv->config;
4818         const struct rte_flow_action_modify_field *action_modify_field =
4819                 action->conf;
4820         uint32_t dst_width = mlx5_flow_item_field_width(config,
4821                                 action_modify_field->dst.field);
4822         uint32_t src_width = mlx5_flow_item_field_width(config,
4823                                 action_modify_field->src.field);
4824
4825         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4826         if (ret)
4827                 return ret;
4828
4829         if (action_modify_field->width == 0)
4830                 return rte_flow_error_set(error, EINVAL,
4831                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4832                                 "no bits are requested to be modified");
4833         else if (action_modify_field->width > dst_width ||
4834                  action_modify_field->width > src_width)
4835                 return rte_flow_error_set(error, EINVAL,
4836                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4837                                 "cannot modify more bits than"
4838                                 " the width of a field");
4839         if (action_modify_field->dst.field != RTE_FLOW_FIELD_VALUE &&
4840             action_modify_field->dst.field != RTE_FLOW_FIELD_POINTER) {
4841                 if ((action_modify_field->dst.offset +
4842                      action_modify_field->width > dst_width) ||
4843                     (action_modify_field->dst.offset % 32))
4844                         return rte_flow_error_set(error, EINVAL,
4845                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4846                                         "destination offset is too big"
4847                                         " or not aligned to 4 bytes");
4848                 if (action_modify_field->dst.level &&
4849                     action_modify_field->dst.field != RTE_FLOW_FIELD_TAG)
4850                         return rte_flow_error_set(error, ENOTSUP,
4851                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4852                                         "inner header fields modification"
4853                                         " is not supported");
4854         }
4855         if (action_modify_field->src.field != RTE_FLOW_FIELD_VALUE &&
4856             action_modify_field->src.field != RTE_FLOW_FIELD_POINTER) {
4857                 if (!attr->transfer && !attr->group)
4858                         return rte_flow_error_set(error, ENOTSUP,
4859                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4860                                         "modify field action is not"
4861                                         " supported for group 0");
4862                 if ((action_modify_field->src.offset +
4863                      action_modify_field->width > src_width) ||
4864                     (action_modify_field->src.offset % 32))
4865                         return rte_flow_error_set(error, EINVAL,
4866                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4867                                         "source offset is too big"
4868                                         " or not aligned to 4 bytes");
4869                 if (action_modify_field->src.level &&
4870                     action_modify_field->src.field != RTE_FLOW_FIELD_TAG)
4871                         return rte_flow_error_set(error, ENOTSUP,
4872                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4873                                         "inner header fields modification"
4874                                         " is not supported");
4875         }
4876         if ((action_modify_field->dst.field ==
4877              action_modify_field->src.field) &&
4878             (action_modify_field->dst.level ==
4879              action_modify_field->src.level))
4880                 return rte_flow_error_set(error, EINVAL,
4881                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4882                                 "source and destination fields"
4883                                 " cannot be the same");
4884         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VALUE ||
4885             action_modify_field->dst.field == RTE_FLOW_FIELD_POINTER)
4886                 return rte_flow_error_set(error, EINVAL,
4887                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4888                                 "immediate value or a pointer to it"
4889                                 " cannot be used as a destination");
4890         if (action_modify_field->dst.field == RTE_FLOW_FIELD_START ||
4891             action_modify_field->src.field == RTE_FLOW_FIELD_START)
4892                 return rte_flow_error_set(error, ENOTSUP,
4893                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4894                                 "modifications of an arbitrary"
4895                                 " place in a packet is not supported");
4896         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VLAN_TYPE ||
4897             action_modify_field->src.field == RTE_FLOW_FIELD_VLAN_TYPE)
4898                 return rte_flow_error_set(error, ENOTSUP,
4899                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4900                                 "modifications of the 802.1Q Tag"
4901                                 " Identifier is not supported");
4902         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VXLAN_VNI ||
4903             action_modify_field->src.field == RTE_FLOW_FIELD_VXLAN_VNI)
4904                 return rte_flow_error_set(error, ENOTSUP,
4905                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4906                                 "modifications of the VXLAN Network"
4907                                 " Identifier is not supported");
4908         if (action_modify_field->dst.field == RTE_FLOW_FIELD_GENEVE_VNI ||
4909             action_modify_field->src.field == RTE_FLOW_FIELD_GENEVE_VNI)
4910                 return rte_flow_error_set(error, ENOTSUP,
4911                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4912                                 "modifications of the GENEVE Network"
4913                                 " Identifier is not supported");
4914         if (action_modify_field->dst.field == RTE_FLOW_FIELD_MARK ||
4915             action_modify_field->src.field == RTE_FLOW_FIELD_MARK ||
4916             action_modify_field->dst.field == RTE_FLOW_FIELD_META ||
4917             action_modify_field->src.field == RTE_FLOW_FIELD_META) {
4918                 if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
4919                     !mlx5_flow_ext_mreg_supported(dev))
4920                         return rte_flow_error_set(error, ENOTSUP,
4921                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4922                                         "cannot modify mark or metadata without"
4923                                         " extended metadata register support");
4924         }
4925         if (action_modify_field->operation != RTE_FLOW_MODIFY_SET)
4926                 return rte_flow_error_set(error, ENOTSUP,
4927                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4928                                 "add and sub operations"
4929                                 " are not supported");
4930         return (action_modify_field->width / 32) +
4931                !!(action_modify_field->width % 32);
4932 }
4933
4934 /**
4935  * Validate jump action.
4936  *
4937  * @param[in] action
4938  *   Pointer to the jump action.
4939  * @param[in] action_flags
4940  *   Holds the actions detected until now.
4941  * @param[in] attributes
4942  *   Pointer to flow attributes
4943  * @param[in] external
4944  *   Action belongs to flow rule created by request external to PMD.
4945  * @param[out] error
4946  *   Pointer to error structure.
4947  *
4948  * @return
4949  *   0 on success, a negative errno value otherwise and rte_errno is set.
4950  */
4951 static int
4952 flow_dv_validate_action_jump(struct rte_eth_dev *dev,
4953                              const struct mlx5_flow_tunnel *tunnel,
4954                              const struct rte_flow_action *action,
4955                              uint64_t action_flags,
4956                              const struct rte_flow_attr *attributes,
4957                              bool external, struct rte_flow_error *error)
4958 {
4959         uint32_t target_group, table;
4960         int ret = 0;
4961         struct flow_grp_info grp_info = {
4962                 .external = !!external,
4963                 .transfer = !!attributes->transfer,
4964                 .fdb_def_rule = 1,
4965                 .std_tbl_fix = 0
4966         };
4967         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
4968                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4969                 return rte_flow_error_set(error, EINVAL,
4970                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4971                                           "can't have 2 fate actions in"
4972                                           " same flow");
4973         if (!action->conf)
4974                 return rte_flow_error_set(error, EINVAL,
4975                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4976                                           NULL, "action configuration not set");
4977         target_group =
4978                 ((const struct rte_flow_action_jump *)action->conf)->group;
4979         ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table,
4980                                        &grp_info, error);
4981         if (ret)
4982                 return ret;
4983         if (attributes->group == target_group &&
4984             !(action_flags & (MLX5_FLOW_ACTION_TUNNEL_SET |
4985                               MLX5_FLOW_ACTION_TUNNEL_MATCH)))
4986                 return rte_flow_error_set(error, EINVAL,
4987                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4988                                           "target group must be other than"
4989                                           " the current flow group");
4990         return 0;
4991 }
4992
4993 /*
4994  * Validate the port_id action.
4995  *
4996  * @param[in] dev
4997  *   Pointer to rte_eth_dev structure.
4998  * @param[in] action_flags
4999  *   Bit-fields that holds the actions detected until now.
5000  * @param[in] action
5001  *   Port_id RTE action structure.
5002  * @param[in] attr
5003  *   Attributes of flow that includes this action.
5004  * @param[out] error
5005  *   Pointer to error structure.
5006  *
5007  * @return
5008  *   0 on success, a negative errno value otherwise and rte_errno is set.
5009  */
5010 static int
5011 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
5012                                 uint64_t action_flags,
5013                                 const struct rte_flow_action *action,
5014                                 const struct rte_flow_attr *attr,
5015                                 struct rte_flow_error *error)
5016 {
5017         const struct rte_flow_action_port_id *port_id;
5018         struct mlx5_priv *act_priv;
5019         struct mlx5_priv *dev_priv;
5020         uint16_t port;
5021
5022         if (!attr->transfer)
5023                 return rte_flow_error_set(error, ENOTSUP,
5024                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5025                                           NULL,
5026                                           "port id action is valid in transfer"
5027                                           " mode only");
5028         if (!action || !action->conf)
5029                 return rte_flow_error_set(error, ENOTSUP,
5030                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
5031                                           NULL,
5032                                           "port id action parameters must be"
5033                                           " specified");
5034         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
5035                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
5036                 return rte_flow_error_set(error, EINVAL,
5037                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5038                                           "can have only one fate actions in"
5039                                           " a flow");
5040         dev_priv = mlx5_dev_to_eswitch_info(dev);
5041         if (!dev_priv)
5042                 return rte_flow_error_set(error, rte_errno,
5043                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5044                                           NULL,
5045                                           "failed to obtain E-Switch info");
5046         port_id = action->conf;
5047         port = port_id->original ? dev->data->port_id : port_id->id;
5048         act_priv = mlx5_port_to_eswitch_info(port, false);
5049         if (!act_priv)
5050                 return rte_flow_error_set
5051                                 (error, rte_errno,
5052                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
5053                                  "failed to obtain E-Switch port id for port");
5054         if (act_priv->domain_id != dev_priv->domain_id)
5055                 return rte_flow_error_set
5056                                 (error, EINVAL,
5057                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5058                                  "port does not belong to"
5059                                  " E-Switch being configured");
5060         return 0;
5061 }
5062
5063 /**
5064  * Get the maximum number of modify header actions.
5065  *
5066  * @param dev
5067  *   Pointer to rte_eth_dev structure.
5068  * @param root
5069  *   Whether action is on root table.
5070  *
5071  * @return
5072  *   Max number of modify header actions device can support.
5073  */
5074 static inline unsigned int
5075 flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
5076                               bool root)
5077 {
5078         /*
5079          * There's no way to directly query the max capacity from FW.
5080          * The maximal value on root table should be assumed to be supported.
5081          */
5082         if (!root)
5083                 return MLX5_MAX_MODIFY_NUM;
5084         else
5085                 return MLX5_ROOT_TBL_MODIFY_NUM;
5086 }
5087
5088 /**
5089  * Validate the meter action.
5090  *
5091  * @param[in] dev
5092  *   Pointer to rte_eth_dev structure.
5093  * @param[in] action_flags
5094  *   Bit-fields that holds the actions detected until now.
5095  * @param[in] action
5096  *   Pointer to the meter action.
5097  * @param[in] attr
5098  *   Attributes of flow that includes this action.
5099  * @param[in] port_id_item
5100  *   Pointer to item indicating port id.
5101  * @param[out] error
5102  *   Pointer to error structure.
5103  *
5104  * @return
5105  *   0 on success, a negative errno value otherwise and rte_ernno is set.
5106  */
5107 static int
5108 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
5109                                 uint64_t action_flags,
5110                                 const struct rte_flow_action *action,
5111                                 const struct rte_flow_attr *attr,
5112                                 const struct rte_flow_item *port_id_item,
5113                                 bool *def_policy,
5114                                 struct rte_flow_error *error)
5115 {
5116         struct mlx5_priv *priv = dev->data->dev_private;
5117         const struct rte_flow_action_meter *am = action->conf;
5118         struct mlx5_flow_meter_info *fm;
5119         struct mlx5_flow_meter_policy *mtr_policy;
5120         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
5121
5122         if (!am)
5123                 return rte_flow_error_set(error, EINVAL,
5124                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5125                                           "meter action conf is NULL");
5126
5127         if (action_flags & MLX5_FLOW_ACTION_METER)
5128                 return rte_flow_error_set(error, ENOTSUP,
5129                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5130                                           "meter chaining not support");
5131         if (action_flags & MLX5_FLOW_ACTION_JUMP)
5132                 return rte_flow_error_set(error, ENOTSUP,
5133                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5134                                           "meter with jump not support");
5135         if (!priv->mtr_en)
5136                 return rte_flow_error_set(error, ENOTSUP,
5137                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5138                                           NULL,
5139                                           "meter action not supported");
5140         fm = mlx5_flow_meter_find(priv, am->mtr_id, NULL);
5141         if (!fm)
5142                 return rte_flow_error_set(error, EINVAL,
5143                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5144                                           "Meter not found");
5145         /* aso meter can always be shared by different domains */
5146         if (fm->ref_cnt && !priv->sh->meter_aso_en &&
5147             !(fm->transfer == attr->transfer ||
5148               (!fm->ingress && !attr->ingress && attr->egress) ||
5149               (!fm->egress && !attr->egress && attr->ingress)))
5150                 return rte_flow_error_set(error, EINVAL,
5151                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5152                         "Flow attributes domain are either invalid "
5153                         "or have a domain conflict with current "
5154                         "meter attributes");
5155         if (fm->def_policy) {
5156                 if (!((attr->transfer &&
5157                         mtrmng->def_policy[MLX5_MTR_DOMAIN_TRANSFER]) ||
5158                         (attr->egress &&
5159                         mtrmng->def_policy[MLX5_MTR_DOMAIN_EGRESS]) ||
5160                         (attr->ingress &&
5161                         mtrmng->def_policy[MLX5_MTR_DOMAIN_INGRESS])))
5162                         return rte_flow_error_set(error, EINVAL,
5163                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5164                                           "Flow attributes domain "
5165                                           "have a conflict with current "
5166                                           "meter domain attributes");
5167                 *def_policy = true;
5168         } else {
5169                 mtr_policy = mlx5_flow_meter_policy_find(dev,
5170                                                 fm->policy_id, NULL);
5171                 if (!mtr_policy)
5172                         return rte_flow_error_set(error, EINVAL,
5173                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5174                                           "Invalid policy id for meter ");
5175                 if (!((attr->transfer && mtr_policy->transfer) ||
5176                         (attr->egress && mtr_policy->egress) ||
5177                         (attr->ingress && mtr_policy->ingress)))
5178                         return rte_flow_error_set(error, EINVAL,
5179                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5180                                           "Flow attributes domain "
5181                                           "have a conflict with current "
5182                                           "meter domain attributes");
5183                 if (attr->transfer && mtr_policy->dev) {
5184                         /**
5185                          * When policy has fate action of port_id,
5186                          * the flow should have the same src port as policy.
5187                          */
5188                         struct mlx5_priv *policy_port_priv =
5189                                         mtr_policy->dev->data->dev_private;
5190                         int32_t flow_src_port = priv->representor_id;
5191
5192                         if (port_id_item) {
5193                                 const struct rte_flow_item_port_id *spec =
5194                                                         port_id_item->spec;
5195                                 struct mlx5_priv *port_priv =
5196                                         mlx5_port_to_eswitch_info(spec->id,
5197                                                                   false);
5198                                 if (!port_priv)
5199                                         return rte_flow_error_set(error,
5200                                                 rte_errno,
5201                                                 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
5202                                                 spec,
5203                                                 "Failed to get port info.");
5204                                 flow_src_port = port_priv->representor_id;
5205                         }
5206                         if (flow_src_port != policy_port_priv->representor_id)
5207                                 return rte_flow_error_set(error,
5208                                                 rte_errno,
5209                                                 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
5210                                                 NULL,
5211                                                 "Flow and meter policy "
5212                                                 "have different src port.");
5213                 }
5214                 *def_policy = false;
5215         }
5216         return 0;
5217 }
5218
5219 /**
5220  * Validate the age action.
5221  *
5222  * @param[in] action_flags
5223  *   Holds the actions detected until now.
5224  * @param[in] action
5225  *   Pointer to the age action.
5226  * @param[in] dev
5227  *   Pointer to the Ethernet device structure.
5228  * @param[out] error
5229  *   Pointer to error structure.
5230  *
5231  * @return
5232  *   0 on success, a negative errno value otherwise and rte_errno is set.
5233  */
5234 static int
5235 flow_dv_validate_action_age(uint64_t action_flags,
5236                             const struct rte_flow_action *action,
5237                             struct rte_eth_dev *dev,
5238                             struct rte_flow_error *error)
5239 {
5240         struct mlx5_priv *priv = dev->data->dev_private;
5241         const struct rte_flow_action_age *age = action->conf;
5242
5243         if (!priv->config.devx || (priv->sh->cmng.counter_fallback &&
5244             !priv->sh->aso_age_mng))
5245                 return rte_flow_error_set(error, ENOTSUP,
5246                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5247                                           NULL,
5248                                           "age action not supported");
5249         if (!(action->conf))
5250                 return rte_flow_error_set(error, EINVAL,
5251                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5252                                           "configuration cannot be null");
5253         if (!(age->timeout))
5254                 return rte_flow_error_set(error, EINVAL,
5255                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5256                                           "invalid timeout value 0");
5257         if (action_flags & MLX5_FLOW_ACTION_AGE)
5258                 return rte_flow_error_set(error, EINVAL,
5259                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5260                                           "duplicate age actions set");
5261         return 0;
5262 }
5263
5264 /**
5265  * Validate the modify-header IPv4 DSCP actions.
5266  *
5267  * @param[in] action_flags
5268  *   Holds the actions detected until now.
5269  * @param[in] action
5270  *   Pointer to the modify action.
5271  * @param[in] item_flags
5272  *   Holds the items detected.
5273  * @param[out] error
5274  *   Pointer to error structure.
5275  *
5276  * @return
5277  *   0 on success, a negative errno value otherwise and rte_errno is set.
5278  */
5279 static int
5280 flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags,
5281                                          const struct rte_flow_action *action,
5282                                          const uint64_t item_flags,
5283                                          struct rte_flow_error *error)
5284 {
5285         int ret = 0;
5286
5287         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5288         if (!ret) {
5289                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
5290                         return rte_flow_error_set(error, EINVAL,
5291                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5292                                                   NULL,
5293                                                   "no ipv4 item in pattern");
5294         }
5295         return ret;
5296 }
5297
5298 /**
5299  * Validate the modify-header IPv6 DSCP actions.
5300  *
5301  * @param[in] action_flags
5302  *   Holds the actions detected until now.
5303  * @param[in] action
5304  *   Pointer to the modify action.
5305  * @param[in] item_flags
5306  *   Holds the items detected.
5307  * @param[out] error
5308  *   Pointer to error structure.
5309  *
5310  * @return
5311  *   0 on success, a negative errno value otherwise and rte_errno is set.
5312  */
5313 static int
5314 flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,
5315                                          const struct rte_flow_action *action,
5316                                          const uint64_t item_flags,
5317                                          struct rte_flow_error *error)
5318 {
5319         int ret = 0;
5320
5321         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5322         if (!ret) {
5323                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
5324                         return rte_flow_error_set(error, EINVAL,
5325                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5326                                                   NULL,
5327                                                   "no ipv6 item in pattern");
5328         }
5329         return ret;
5330 }
5331
5332 int
5333 flow_dv_modify_match_cb(void *tool_ctx __rte_unused,
5334                         struct mlx5_list_entry *entry, void *cb_ctx)
5335 {
5336         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5337         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5338         struct mlx5_flow_dv_modify_hdr_resource *resource =
5339                                   container_of(entry, typeof(*resource), entry);
5340         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5341
5342         key_len += ref->actions_num * sizeof(ref->actions[0]);
5343         return ref->actions_num != resource->actions_num ||
5344                memcmp(&ref->ft_type, &resource->ft_type, key_len);
5345 }
5346
5347 static struct mlx5_indexed_pool *
5348 flow_dv_modify_ipool_get(struct mlx5_dev_ctx_shared *sh, uint8_t index)
5349 {
5350         struct mlx5_indexed_pool *ipool = __atomic_load_n
5351                                      (&sh->mdh_ipools[index], __ATOMIC_SEQ_CST);
5352
5353         if (!ipool) {
5354                 struct mlx5_indexed_pool *expected = NULL;
5355                 struct mlx5_indexed_pool_config cfg =
5356                     (struct mlx5_indexed_pool_config) {
5357                        .size = sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
5358                                                                    (index + 1) *
5359                                            sizeof(struct mlx5_modification_cmd),
5360                        .trunk_size = 64,
5361                        .grow_trunk = 3,
5362                        .grow_shift = 2,
5363                        .need_lock = 1,
5364                        .release_mem_en = !!sh->reclaim_mode,
5365                        .per_core_cache = sh->reclaim_mode ? 0 : (1 << 16),
5366                        .malloc = mlx5_malloc,
5367                        .free = mlx5_free,
5368                        .type = "mlx5_modify_action_resource",
5369                 };
5370
5371                 cfg.size = RTE_ALIGN(cfg.size, sizeof(ipool));
5372                 ipool = mlx5_ipool_create(&cfg);
5373                 if (!ipool)
5374                         return NULL;
5375                 if (!__atomic_compare_exchange_n(&sh->mdh_ipools[index],
5376                                                  &expected, ipool, false,
5377                                                  __ATOMIC_SEQ_CST,
5378                                                  __ATOMIC_SEQ_CST)) {
5379                         mlx5_ipool_destroy(ipool);
5380                         ipool = __atomic_load_n(&sh->mdh_ipools[index],
5381                                                 __ATOMIC_SEQ_CST);
5382                 }
5383         }
5384         return ipool;
5385 }
5386
5387 struct mlx5_list_entry *
5388 flow_dv_modify_create_cb(void *tool_ctx, void *cb_ctx)
5389 {
5390         struct mlx5_dev_ctx_shared *sh = tool_ctx;
5391         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5392         struct mlx5dv_dr_domain *ns;
5393         struct mlx5_flow_dv_modify_hdr_resource *entry;
5394         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5395         struct mlx5_indexed_pool *ipool = flow_dv_modify_ipool_get(sh,
5396                                                           ref->actions_num - 1);
5397         int ret;
5398         uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
5399         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5400         uint32_t idx;
5401
5402         if (unlikely(!ipool)) {
5403                 rte_flow_error_set(ctx->error, ENOMEM,
5404                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5405                                    NULL, "cannot allocate modify ipool");
5406                 return NULL;
5407         }
5408         entry = mlx5_ipool_zmalloc(ipool, &idx);
5409         if (!entry) {
5410                 rte_flow_error_set(ctx->error, ENOMEM,
5411                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5412                                    "cannot allocate resource memory");
5413                 return NULL;
5414         }
5415         rte_memcpy(&entry->ft_type,
5416                    RTE_PTR_ADD(ref, offsetof(typeof(*ref), ft_type)),
5417                    key_len + data_len);
5418         if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
5419                 ns = sh->fdb_domain;
5420         else if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
5421                 ns = sh->tx_domain;
5422         else
5423                 ns = sh->rx_domain;
5424         ret = mlx5_flow_os_create_flow_action_modify_header
5425                                         (sh->ctx, ns, entry,
5426                                          data_len, &entry->action);
5427         if (ret) {
5428                 mlx5_ipool_free(sh->mdh_ipools[ref->actions_num - 1], idx);
5429                 rte_flow_error_set(ctx->error, ENOMEM,
5430                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5431                                    NULL, "cannot create modification action");
5432                 return NULL;
5433         }
5434         entry->idx = idx;
5435         return &entry->entry;
5436 }
5437
5438 struct mlx5_list_entry *
5439 flow_dv_modify_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
5440                         void *cb_ctx)
5441 {
5442         struct mlx5_dev_ctx_shared *sh = tool_ctx;
5443         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5444         struct mlx5_flow_dv_modify_hdr_resource *entry;
5445         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5446         uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
5447         uint32_t idx;
5448
5449         entry = mlx5_ipool_malloc(sh->mdh_ipools[ref->actions_num - 1],
5450                                   &idx);
5451         if (!entry) {
5452                 rte_flow_error_set(ctx->error, ENOMEM,
5453                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5454                                    "cannot allocate resource memory");
5455                 return NULL;
5456         }
5457         memcpy(entry, oentry, sizeof(*entry) + data_len);
5458         entry->idx = idx;
5459         return &entry->entry;
5460 }
5461
5462 void
5463 flow_dv_modify_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
5464 {
5465         struct mlx5_dev_ctx_shared *sh = tool_ctx;
5466         struct mlx5_flow_dv_modify_hdr_resource *res =
5467                 container_of(entry, typeof(*res), entry);
5468
5469         mlx5_ipool_free(sh->mdh_ipools[res->actions_num - 1], res->idx);
5470 }
5471
5472 /**
5473  * Validate the sample action.
5474  *
5475  * @param[in, out] action_flags
5476  *   Holds the actions detected until now.
5477  * @param[in] action
5478  *   Pointer to the sample action.
5479  * @param[in] dev
5480  *   Pointer to the Ethernet device structure.
5481  * @param[in] attr
5482  *   Attributes of flow that includes this action.
5483  * @param[in] item_flags
5484  *   Holds the items detected.
5485  * @param[in] rss
5486  *   Pointer to the RSS action.
5487  * @param[out] sample_rss
5488  *   Pointer to the RSS action in sample action list.
5489  * @param[out] count
5490  *   Pointer to the COUNT action in sample action list.
5491  * @param[out] fdb_mirror_limit
5492  *   Pointer to the FDB mirror limitation flag.
5493  * @param[out] error
5494  *   Pointer to error structure.
5495  *
5496  * @return
5497  *   0 on success, a negative errno value otherwise and rte_errno is set.
5498  */
5499 static int
5500 flow_dv_validate_action_sample(uint64_t *action_flags,
5501                                const struct rte_flow_action *action,
5502                                struct rte_eth_dev *dev,
5503                                const struct rte_flow_attr *attr,
5504                                uint64_t item_flags,
5505                                const struct rte_flow_action_rss *rss,
5506                                const struct rte_flow_action_rss **sample_rss,
5507                                const struct rte_flow_action_count **count,
5508                                int *fdb_mirror_limit,
5509                                struct rte_flow_error *error)
5510 {
5511         struct mlx5_priv *priv = dev->data->dev_private;
5512         struct mlx5_dev_config *dev_conf = &priv->config;
5513         const struct rte_flow_action_sample *sample = action->conf;
5514         const struct rte_flow_action *act;
5515         uint64_t sub_action_flags = 0;
5516         uint16_t queue_index = 0xFFFF;
5517         int actions_n = 0;
5518         int ret;
5519
5520         if (!sample)
5521                 return rte_flow_error_set(error, EINVAL,
5522                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5523                                           "configuration cannot be NULL");
5524         if (sample->ratio == 0)
5525                 return rte_flow_error_set(error, EINVAL,
5526                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5527                                           "ratio value starts from 1");
5528         if (!priv->config.devx || (sample->ratio > 0 && !priv->sampler_en))
5529                 return rte_flow_error_set(error, ENOTSUP,
5530                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5531                                           NULL,
5532                                           "sample action not supported");
5533         if (*action_flags & MLX5_FLOW_ACTION_SAMPLE)
5534                 return rte_flow_error_set(error, EINVAL,
5535                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5536                                           "Multiple sample actions not "
5537                                           "supported");
5538         if (*action_flags & MLX5_FLOW_ACTION_METER)
5539                 return rte_flow_error_set(error, EINVAL,
5540                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5541                                           "wrong action order, meter should "
5542                                           "be after sample action");
5543         if (*action_flags & MLX5_FLOW_ACTION_JUMP)
5544                 return rte_flow_error_set(error, EINVAL,
5545                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5546                                           "wrong action order, jump should "
5547                                           "be after sample action");
5548         act = sample->actions;
5549         for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
5550                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
5551                         return rte_flow_error_set(error, ENOTSUP,
5552                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5553                                                   act, "too many actions");
5554                 switch (act->type) {
5555                 case RTE_FLOW_ACTION_TYPE_QUEUE:
5556                         ret = mlx5_flow_validate_action_queue(act,
5557                                                               sub_action_flags,
5558                                                               dev,
5559                                                               attr, error);
5560                         if (ret < 0)
5561                                 return ret;
5562                         queue_index = ((const struct rte_flow_action_queue *)
5563                                                         (act->conf))->index;
5564                         sub_action_flags |= MLX5_FLOW_ACTION_QUEUE;
5565                         ++actions_n;
5566                         break;
5567                 case RTE_FLOW_ACTION_TYPE_RSS:
5568                         *sample_rss = act->conf;
5569                         ret = mlx5_flow_validate_action_rss(act,
5570                                                             sub_action_flags,
5571                                                             dev, attr,
5572                                                             item_flags,
5573                                                             error);
5574                         if (ret < 0)
5575                                 return ret;
5576                         if (rss && *sample_rss &&
5577                             ((*sample_rss)->level != rss->level ||
5578                             (*sample_rss)->types != rss->types))
5579                                 return rte_flow_error_set(error, ENOTSUP,
5580                                         RTE_FLOW_ERROR_TYPE_ACTION,
5581                                         NULL,
5582                                         "Can't use the different RSS types "
5583                                         "or level in the same flow");
5584                         if (*sample_rss != NULL && (*sample_rss)->queue_num)
5585                                 queue_index = (*sample_rss)->queue[0];
5586                         sub_action_flags |= MLX5_FLOW_ACTION_RSS;
5587                         ++actions_n;
5588                         break;
5589                 case RTE_FLOW_ACTION_TYPE_MARK:
5590                         ret = flow_dv_validate_action_mark(dev, act,
5591                                                            sub_action_flags,
5592                                                            attr, error);
5593                         if (ret < 0)
5594                                 return ret;
5595                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
5596                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK |
5597                                                 MLX5_FLOW_ACTION_MARK_EXT;
5598                         else
5599                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK;
5600                         ++actions_n;
5601                         break;
5602                 case RTE_FLOW_ACTION_TYPE_COUNT:
5603                         ret = flow_dv_validate_action_count
5604                                 (dev, is_shared_action_count(act),
5605                                  *action_flags | sub_action_flags,
5606                                  error);
5607                         if (ret < 0)
5608                                 return ret;
5609                         *count = act->conf;
5610                         sub_action_flags |= MLX5_FLOW_ACTION_COUNT;
5611                         *action_flags |= MLX5_FLOW_ACTION_COUNT;
5612                         ++actions_n;
5613                         break;
5614                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
5615                         ret = flow_dv_validate_action_port_id(dev,
5616                                                               sub_action_flags,
5617                                                               act,
5618                                                               attr,
5619                                                               error);
5620                         if (ret)
5621                                 return ret;
5622                         sub_action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5623                         ++actions_n;
5624                         break;
5625                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5626                         ret = flow_dv_validate_action_raw_encap_decap
5627                                 (dev, NULL, act->conf, attr, &sub_action_flags,
5628                                  &actions_n, action, item_flags, error);
5629                         if (ret < 0)
5630                                 return ret;
5631                         ++actions_n;
5632                         break;
5633                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5634                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5635                         ret = flow_dv_validate_action_l2_encap(dev,
5636                                                                sub_action_flags,
5637                                                                act, attr,
5638                                                                error);
5639                         if (ret < 0)
5640                                 return ret;
5641                         sub_action_flags |= MLX5_FLOW_ACTION_ENCAP;
5642                         ++actions_n;
5643                         break;
5644                 default:
5645                         return rte_flow_error_set(error, ENOTSUP,
5646                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5647                                                   NULL,
5648                                                   "Doesn't support optional "
5649                                                   "action");
5650                 }
5651         }
5652         if (attr->ingress && !attr->transfer) {
5653                 if (!(sub_action_flags & (MLX5_FLOW_ACTION_QUEUE |
5654                                           MLX5_FLOW_ACTION_RSS)))
5655                         return rte_flow_error_set(error, EINVAL,
5656                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5657                                                   NULL,
5658                                                   "Ingress must has a dest "
5659                                                   "QUEUE for Sample");
5660         } else if (attr->egress && !attr->transfer) {
5661                 return rte_flow_error_set(error, ENOTSUP,
5662                                           RTE_FLOW_ERROR_TYPE_ACTION,
5663                                           NULL,
5664                                           "Sample Only support Ingress "
5665                                           "or E-Switch");
5666         } else if (sample->actions->type != RTE_FLOW_ACTION_TYPE_END) {
5667                 MLX5_ASSERT(attr->transfer);
5668                 if (sample->ratio > 1)
5669                         return rte_flow_error_set(error, ENOTSUP,
5670                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5671                                                   NULL,
5672                                                   "E-Switch doesn't support "
5673                                                   "any optional action "
5674                                                   "for sampling");
5675                 if (sub_action_flags & MLX5_FLOW_ACTION_QUEUE)
5676                         return rte_flow_error_set(error, ENOTSUP,
5677                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5678                                                   NULL,
5679                                                   "unsupported action QUEUE");
5680                 if (sub_action_flags & MLX5_FLOW_ACTION_RSS)
5681                         return rte_flow_error_set(error, ENOTSUP,
5682                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5683                                                   NULL,
5684                                                   "unsupported action QUEUE");
5685                 if (!(sub_action_flags & MLX5_FLOW_ACTION_PORT_ID))
5686                         return rte_flow_error_set(error, EINVAL,
5687                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5688                                                   NULL,
5689                                                   "E-Switch must has a dest "
5690                                                   "port for mirroring");
5691                 if (!priv->config.hca_attr.reg_c_preserve &&
5692                      priv->representor_id != UINT16_MAX)
5693                         *fdb_mirror_limit = 1;
5694         }
5695         /* Continue validation for Xcap actions.*/
5696         if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) &&
5697             (queue_index == 0xFFFF ||
5698              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
5699                 if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
5700                      MLX5_FLOW_XCAP_ACTIONS)
5701                         return rte_flow_error_set(error, ENOTSUP,
5702                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5703                                                   NULL, "encap and decap "
5704                                                   "combination aren't "
5705                                                   "supported");
5706                 if (!attr->transfer && attr->ingress && (sub_action_flags &
5707                                                         MLX5_FLOW_ACTION_ENCAP))
5708                         return rte_flow_error_set(error, ENOTSUP,
5709                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5710                                                   NULL, "encap is not supported"
5711                                                   " for ingress traffic");
5712         }
5713         return 0;
5714 }
5715
5716 /**
5717  * Find existing modify-header resource or create and register a new one.
5718  *
5719  * @param dev[in, out]
5720  *   Pointer to rte_eth_dev structure.
5721  * @param[in, out] resource
5722  *   Pointer to modify-header resource.
5723  * @parm[in, out] dev_flow
5724  *   Pointer to the dev_flow.
5725  * @param[out] error
5726  *   pointer to error structure.
5727  *
5728  * @return
5729  *   0 on success otherwise -errno and errno is set.
5730  */
5731 static int
5732 flow_dv_modify_hdr_resource_register
5733                         (struct rte_eth_dev *dev,
5734                          struct mlx5_flow_dv_modify_hdr_resource *resource,
5735                          struct mlx5_flow *dev_flow,
5736                          struct rte_flow_error *error)
5737 {
5738         struct mlx5_priv *priv = dev->data->dev_private;
5739         struct mlx5_dev_ctx_shared *sh = priv->sh;
5740         uint32_t key_len = sizeof(*resource) -
5741                            offsetof(typeof(*resource), ft_type) +
5742                            resource->actions_num * sizeof(resource->actions[0]);
5743         struct mlx5_list_entry *entry;
5744         struct mlx5_flow_cb_ctx ctx = {
5745                 .error = error,
5746                 .data = resource,
5747         };
5748         uint64_t key64;
5749
5750         resource->root = !dev_flow->dv.group;
5751         if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
5752                                                                 resource->root))
5753                 return rte_flow_error_set(error, EOVERFLOW,
5754                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5755                                           "too many modify header items");
5756         key64 = __rte_raw_cksum(&resource->ft_type, key_len, 0);
5757         entry = mlx5_hlist_register(sh->modify_cmds, key64, &ctx);
5758         if (!entry)
5759                 return -rte_errno;
5760         resource = container_of(entry, typeof(*resource), entry);
5761         dev_flow->handle->dvh.modify_hdr = resource;
5762         return 0;
5763 }
5764
5765 /**
5766  * Get DV flow counter by index.
5767  *
5768  * @param[in] dev
5769  *   Pointer to the Ethernet device structure.
5770  * @param[in] idx
5771  *   mlx5 flow counter index in the container.
5772  * @param[out] ppool
5773  *   mlx5 flow counter pool in the container.
5774  *
5775  * @return
5776  *   Pointer to the counter, NULL otherwise.
5777  */
5778 static struct mlx5_flow_counter *
5779 flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
5780                            uint32_t idx,
5781                            struct mlx5_flow_counter_pool **ppool)
5782 {
5783         struct mlx5_priv *priv = dev->data->dev_private;
5784         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5785         struct mlx5_flow_counter_pool *pool;
5786
5787         /* Decrease to original index and clear shared bit. */
5788         idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
5789         MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cmng->n);
5790         pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
5791         MLX5_ASSERT(pool);
5792         if (ppool)
5793                 *ppool = pool;
5794         return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
5795 }
5796
5797 /**
5798  * Check the devx counter belongs to the pool.
5799  *
5800  * @param[in] pool
5801  *   Pointer to the counter pool.
5802  * @param[in] id
5803  *   The counter devx ID.
5804  *
5805  * @return
5806  *   True if counter belongs to the pool, false otherwise.
5807  */
5808 static bool
5809 flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id)
5810 {
5811         int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
5812                    MLX5_COUNTERS_PER_POOL;
5813
5814         if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
5815                 return true;
5816         return false;
5817 }
5818
5819 /**
5820  * Get a pool by devx counter ID.
5821  *
5822  * @param[in] cmng
5823  *   Pointer to the counter management.
5824  * @param[in] id
5825  *   The counter devx ID.
5826  *
5827  * @return
5828  *   The counter pool pointer if exists, NULL otherwise,
5829  */
5830 static struct mlx5_flow_counter_pool *
5831 flow_dv_find_pool_by_id(struct mlx5_flow_counter_mng *cmng, int id)
5832 {
5833         uint32_t i;
5834         struct mlx5_flow_counter_pool *pool = NULL;
5835
5836         rte_spinlock_lock(&cmng->pool_update_sl);
5837         /* Check last used pool. */
5838         if (cmng->last_pool_idx != POOL_IDX_INVALID &&
5839             flow_dv_is_counter_in_pool(cmng->pools[cmng->last_pool_idx], id)) {
5840                 pool = cmng->pools[cmng->last_pool_idx];
5841                 goto out;
5842         }
5843         /* ID out of range means no suitable pool in the container. */
5844         if (id > cmng->max_id || id < cmng->min_id)
5845                 goto out;
5846         /*
5847          * Find the pool from the end of the container, since mostly counter
5848          * ID is sequence increasing, and the last pool should be the needed
5849          * one.
5850          */
5851         i = cmng->n_valid;
5852         while (i--) {
5853                 struct mlx5_flow_counter_pool *pool_tmp = cmng->pools[i];
5854
5855                 if (flow_dv_is_counter_in_pool(pool_tmp, id)) {
5856                         pool = pool_tmp;
5857                         break;
5858                 }
5859         }
5860 out:
5861         rte_spinlock_unlock(&cmng->pool_update_sl);
5862         return pool;
5863 }
5864
5865 /**
5866  * Resize a counter container.
5867  *
5868  * @param[in] dev
5869  *   Pointer to the Ethernet device structure.
5870  *
5871  * @return
5872  *   0 on success, otherwise negative errno value and rte_errno is set.
5873  */
5874 static int
5875 flow_dv_container_resize(struct rte_eth_dev *dev)
5876 {
5877         struct mlx5_priv *priv = dev->data->dev_private;
5878         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5879         void *old_pools = cmng->pools;
5880         uint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE;
5881         uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
5882         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
5883
5884         if (!pools) {
5885                 rte_errno = ENOMEM;
5886                 return -ENOMEM;
5887         }
5888         if (old_pools)
5889                 memcpy(pools, old_pools, cmng->n *
5890                                        sizeof(struct mlx5_flow_counter_pool *));
5891         cmng->n = resize;
5892         cmng->pools = pools;
5893         if (old_pools)
5894                 mlx5_free(old_pools);
5895         return 0;
5896 }
5897
5898 /**
5899  * Query a devx flow counter.
5900  *
5901  * @param[in] dev
5902  *   Pointer to the Ethernet device structure.
5903  * @param[in] counter
5904  *   Index to the flow counter.
5905  * @param[out] pkts
5906  *   The statistics value of packets.
5907  * @param[out] bytes
5908  *   The statistics value of bytes.
5909  *
5910  * @return
5911  *   0 on success, otherwise a negative errno value and rte_errno is set.
5912  */
5913 static inline int
5914 _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
5915                      uint64_t *bytes)
5916 {
5917         struct mlx5_priv *priv = dev->data->dev_private;
5918         struct mlx5_flow_counter_pool *pool = NULL;
5919         struct mlx5_flow_counter *cnt;
5920         int offset;
5921
5922         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
5923         MLX5_ASSERT(pool);
5924         if (priv->sh->cmng.counter_fallback)
5925                 return mlx5_devx_cmd_flow_counter_query(cnt->dcs_when_active, 0,
5926                                         0, pkts, bytes, 0, NULL, NULL, 0);
5927         rte_spinlock_lock(&pool->sl);
5928         if (!pool->raw) {
5929                 *pkts = 0;
5930                 *bytes = 0;
5931         } else {
5932                 offset = MLX5_CNT_ARRAY_IDX(pool, cnt);
5933                 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
5934                 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
5935         }
5936         rte_spinlock_unlock(&pool->sl);
5937         return 0;
5938 }
5939
5940 /**
5941  * Create and initialize a new counter pool.
5942  *
5943  * @param[in] dev
5944  *   Pointer to the Ethernet device structure.
5945  * @param[out] dcs
5946  *   The devX counter handle.
5947  * @param[in] age
5948  *   Whether the pool is for counter that was allocated for aging.
5949  * @param[in/out] cont_cur
5950  *   Pointer to the container pointer, it will be update in pool resize.
5951  *
5952  * @return
5953  *   The pool container pointer on success, NULL otherwise and rte_errno is set.
5954  */
5955 static struct mlx5_flow_counter_pool *
5956 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
5957                     uint32_t age)
5958 {
5959         struct mlx5_priv *priv = dev->data->dev_private;
5960         struct mlx5_flow_counter_pool *pool;
5961         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5962         bool fallback = priv->sh->cmng.counter_fallback;
5963         uint32_t size = sizeof(*pool);
5964
5965         size += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE;
5966         size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * MLX5_AGE_SIZE);
5967         pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
5968         if (!pool) {
5969                 rte_errno = ENOMEM;
5970                 return NULL;
5971         }
5972         pool->raw = NULL;
5973         pool->is_aged = !!age;
5974         pool->query_gen = 0;
5975         pool->min_dcs = dcs;
5976         rte_spinlock_init(&pool->sl);
5977         rte_spinlock_init(&pool->csl);
5978         TAILQ_INIT(&pool->counters[0]);
5979         TAILQ_INIT(&pool->counters[1]);
5980         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
5981         rte_spinlock_lock(&cmng->pool_update_sl);
5982         pool->index = cmng->n_valid;
5983         if (pool->index == cmng->n && flow_dv_container_resize(dev)) {
5984                 mlx5_free(pool);
5985                 rte_spinlock_unlock(&cmng->pool_update_sl);
5986                 return NULL;
5987         }
5988         cmng->pools[pool->index] = pool;
5989         cmng->n_valid++;
5990         if (unlikely(fallback)) {
5991                 int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
5992
5993                 if (base < cmng->min_id)
5994                         cmng->min_id = base;
5995                 if (base > cmng->max_id)
5996                         cmng->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
5997                 cmng->last_pool_idx = pool->index;
5998         }
5999         rte_spinlock_unlock(&cmng->pool_update_sl);
6000         return pool;
6001 }
6002
6003 /**
6004  * Prepare a new counter and/or a new counter pool.
6005  *
6006  * @param[in] dev
6007  *   Pointer to the Ethernet device structure.
6008  * @param[out] cnt_free
6009  *   Where to put the pointer of a new counter.
6010  * @param[in] age
6011  *   Whether the pool is for counter that was allocated for aging.
6012  *
6013  * @return
6014  *   The counter pool pointer and @p cnt_free is set on success,
6015  *   NULL otherwise and rte_errno is set.
6016  */
6017 static struct mlx5_flow_counter_pool *
6018 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
6019                              struct mlx5_flow_counter **cnt_free,
6020                              uint32_t age)
6021 {
6022         struct mlx5_priv *priv = dev->data->dev_private;
6023         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
6024         struct mlx5_flow_counter_pool *pool;
6025         struct mlx5_counters tmp_tq;
6026         struct mlx5_devx_obj *dcs = NULL;
6027         struct mlx5_flow_counter *cnt;
6028         enum mlx5_counter_type cnt_type =
6029                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
6030         bool fallback = priv->sh->cmng.counter_fallback;
6031         uint32_t i;
6032
6033         if (fallback) {
6034                 /* bulk_bitmap must be 0 for single counter allocation. */
6035                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
6036                 if (!dcs)
6037                         return NULL;
6038                 pool = flow_dv_find_pool_by_id(cmng, dcs->id);
6039                 if (!pool) {
6040                         pool = flow_dv_pool_create(dev, dcs, age);
6041                         if (!pool) {
6042                                 mlx5_devx_cmd_destroy(dcs);
6043                                 return NULL;
6044                         }
6045                 }
6046                 i = dcs->id % MLX5_COUNTERS_PER_POOL;
6047                 cnt = MLX5_POOL_GET_CNT(pool, i);
6048                 cnt->pool = pool;
6049                 cnt->dcs_when_free = dcs;
6050                 *cnt_free = cnt;
6051                 return pool;
6052         }
6053         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
6054         if (!dcs) {
6055                 rte_errno = ENODATA;
6056                 return NULL;
6057         }
6058         pool = flow_dv_pool_create(dev, dcs, age);
6059         if (!pool) {
6060                 mlx5_devx_cmd_destroy(dcs);
6061                 return NULL;
6062         }
6063         TAILQ_INIT(&tmp_tq);
6064         for (i = 1; i < MLX5_COUNTERS_PER_POOL; ++i) {
6065                 cnt = MLX5_POOL_GET_CNT(pool, i);
6066                 cnt->pool = pool;
6067                 TAILQ_INSERT_HEAD(&tmp_tq, cnt, next);
6068         }
6069         rte_spinlock_lock(&cmng->csl[cnt_type]);
6070         TAILQ_CONCAT(&cmng->counters[cnt_type], &tmp_tq, next);
6071         rte_spinlock_unlock(&cmng->csl[cnt_type]);
6072         *cnt_free = MLX5_POOL_GET_CNT(pool, 0);
6073         (*cnt_free)->pool = pool;
6074         return pool;
6075 }
6076
6077 /**
6078  * Allocate a flow counter.
6079  *
6080  * @param[in] dev
6081  *   Pointer to the Ethernet device structure.
6082  * @param[in] age
6083  *   Whether the counter was allocated for aging.
6084  *
6085  * @return
6086  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
6087  */
6088 static uint32_t
6089 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age)
6090 {
6091         struct mlx5_priv *priv = dev->data->dev_private;
6092         struct mlx5_flow_counter_pool *pool = NULL;
6093         struct mlx5_flow_counter *cnt_free = NULL;
6094         bool fallback = priv->sh->cmng.counter_fallback;
6095         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
6096         enum mlx5_counter_type cnt_type =
6097                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
6098         uint32_t cnt_idx;
6099
6100         if (!priv->config.devx) {
6101                 rte_errno = ENOTSUP;
6102                 return 0;
6103         }
6104         /* Get free counters from container. */
6105         rte_spinlock_lock(&cmng->csl[cnt_type]);
6106         cnt_free = TAILQ_FIRST(&cmng->counters[cnt_type]);
6107         if (cnt_free)
6108                 TAILQ_REMOVE(&cmng->counters[cnt_type], cnt_free, next);
6109         rte_spinlock_unlock(&cmng->csl[cnt_type]);
6110         if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free, age))
6111                 goto err;
6112         pool = cnt_free->pool;
6113         if (fallback)
6114                 cnt_free->dcs_when_active = cnt_free->dcs_when_free;
6115         /* Create a DV counter action only in the first time usage. */
6116         if (!cnt_free->action) {
6117                 uint16_t offset;
6118                 struct mlx5_devx_obj *dcs;
6119                 int ret;
6120
6121                 if (!fallback) {
6122                         offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free);
6123                         dcs = pool->min_dcs;
6124                 } else {
6125                         offset = 0;
6126                         dcs = cnt_free->dcs_when_free;
6127                 }
6128                 ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset,
6129                                                             &cnt_free->action);
6130                 if (ret) {
6131                         rte_errno = errno;
6132                         goto err;
6133                 }
6134         }
6135         cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
6136                                 MLX5_CNT_ARRAY_IDX(pool, cnt_free));
6137         /* Update the counter reset values. */
6138         if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
6139                                  &cnt_free->bytes))
6140                 goto err;
6141         if (!fallback && !priv->sh->cmng.query_thread_on)
6142                 /* Start the asynchronous batch query by the host thread. */
6143                 mlx5_set_query_alarm(priv->sh);
6144         /*
6145          * When the count action isn't shared (by ID), shared_info field is
6146          * used for indirect action API's refcnt.
6147          * When the counter action is not shared neither by ID nor by indirect
6148          * action API, shared info must be 1.
6149          */
6150         cnt_free->shared_info.refcnt = 1;
6151         return cnt_idx;
6152 err:
6153         if (cnt_free) {
6154                 cnt_free->pool = pool;
6155                 if (fallback)
6156                         cnt_free->dcs_when_free = cnt_free->dcs_when_active;
6157                 rte_spinlock_lock(&cmng->csl[cnt_type]);
6158                 TAILQ_INSERT_TAIL(&cmng->counters[cnt_type], cnt_free, next);
6159                 rte_spinlock_unlock(&cmng->csl[cnt_type]);
6160         }
6161         return 0;
6162 }
6163
6164 /**
6165  * Allocate a shared flow counter.
6166  *
6167  * @param[in] ctx
6168  *   Pointer to the shared counter configuration.
6169  * @param[in] data
6170  *   Pointer to save the allocated counter index.
6171  *
6172  * @return
6173  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
6174  */
6175
6176 static int32_t
6177 flow_dv_counter_alloc_shared_cb(void *ctx, union mlx5_l3t_data *data)
6178 {
6179         struct mlx5_shared_counter_conf *conf = ctx;
6180         struct rte_eth_dev *dev = conf->dev;
6181         struct mlx5_flow_counter *cnt;
6182
6183         data->dword = flow_dv_counter_alloc(dev, 0);
6184         data->dword |= MLX5_CNT_SHARED_OFFSET;
6185         cnt = flow_dv_counter_get_by_idx(dev, data->dword, NULL);
6186         cnt->shared_info.id = conf->id;
6187         return 0;
6188 }
6189
6190 /**
6191  * Get a shared flow counter.
6192  *
6193  * @param[in] dev
6194  *   Pointer to the Ethernet device structure.
6195  * @param[in] id
6196  *   Counter identifier.
6197  *
6198  * @return
6199  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
6200  */
6201 static uint32_t
6202 flow_dv_counter_get_shared(struct rte_eth_dev *dev, uint32_t id)
6203 {
6204         struct mlx5_priv *priv = dev->data->dev_private;
6205         struct mlx5_shared_counter_conf conf = {
6206                 .dev = dev,
6207                 .id = id,
6208         };
6209         union mlx5_l3t_data data = {
6210                 .dword = 0,
6211         };
6212
6213         mlx5_l3t_prepare_entry(priv->sh->cnt_id_tbl, id, &data,
6214                                flow_dv_counter_alloc_shared_cb, &conf);
6215         return data.dword;
6216 }
6217
6218 /**
6219  * Get age param from counter index.
6220  *
6221  * @param[in] dev
6222  *   Pointer to the Ethernet device structure.
6223  * @param[in] counter
6224  *   Index to the counter handler.
6225  *
6226  * @return
6227  *   The aging parameter specified for the counter index.
6228  */
6229 static struct mlx5_age_param*
6230 flow_dv_counter_idx_get_age(struct rte_eth_dev *dev,
6231                                 uint32_t counter)
6232 {
6233         struct mlx5_flow_counter *cnt;
6234         struct mlx5_flow_counter_pool *pool = NULL;
6235
6236         flow_dv_counter_get_by_idx(dev, counter, &pool);
6237         counter = (counter - 1) % MLX5_COUNTERS_PER_POOL;
6238         cnt = MLX5_POOL_GET_CNT(pool, counter);
6239         return MLX5_CNT_TO_AGE(cnt);
6240 }
6241
6242 /**
6243  * Remove a flow counter from aged counter list.
6244  *
6245  * @param[in] dev
6246  *   Pointer to the Ethernet device structure.
6247  * @param[in] counter
6248  *   Index to the counter handler.
6249  * @param[in] cnt
6250  *   Pointer to the counter handler.
6251  */
6252 static void
6253 flow_dv_counter_remove_from_age(struct rte_eth_dev *dev,
6254                                 uint32_t counter, struct mlx5_flow_counter *cnt)
6255 {
6256         struct mlx5_age_info *age_info;
6257         struct mlx5_age_param *age_param;
6258         struct mlx5_priv *priv = dev->data->dev_private;
6259         uint16_t expected = AGE_CANDIDATE;
6260
6261         age_info = GET_PORT_AGE_INFO(priv);
6262         age_param = flow_dv_counter_idx_get_age(dev, counter);
6263         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
6264                                          AGE_FREE, false, __ATOMIC_RELAXED,
6265                                          __ATOMIC_RELAXED)) {
6266                 /**
6267                  * We need the lock even it is age timeout,
6268                  * since counter may still in process.
6269                  */
6270                 rte_spinlock_lock(&age_info->aged_sl);
6271                 TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
6272                 rte_spinlock_unlock(&age_info->aged_sl);
6273                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
6274         }
6275 }
6276
6277 /**
6278  * Release a flow counter.
6279  *
6280  * @param[in] dev
6281  *   Pointer to the Ethernet device structure.
6282  * @param[in] counter
6283  *   Index to the counter handler.
6284  */
6285 static void
6286 flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter)
6287 {
6288         struct mlx5_priv *priv = dev->data->dev_private;
6289         struct mlx5_flow_counter_pool *pool = NULL;
6290         struct mlx5_flow_counter *cnt;
6291         enum mlx5_counter_type cnt_type;
6292
6293         if (!counter)
6294                 return;
6295         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
6296         MLX5_ASSERT(pool);
6297         if (pool->is_aged) {
6298                 flow_dv_counter_remove_from_age(dev, counter, cnt);
6299         } else {
6300                 /*
6301                  * If the counter action is shared by ID, the l3t_clear_entry
6302                  * function reduces its references counter. If after the
6303                  * reduction the action is still referenced, the function
6304                  * returns here and does not release it.
6305                  */
6306                 if (IS_LEGACY_SHARED_CNT(counter) &&
6307                     mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl,
6308                                          cnt->shared_info.id))
6309                         return;
6310                 /*
6311                  * If the counter action is shared by indirect action API,
6312                  * the atomic function reduces its references counter.
6313                  * If after the reduction the action is still referenced, the
6314                  * function returns here and does not release it.
6315                  * When the counter action is not shared neither by ID nor by
6316                  * indirect action API, shared info is 1 before the reduction,
6317                  * so this condition is failed and function doesn't return here.
6318                  */
6319                 if (!IS_LEGACY_SHARED_CNT(counter) &&
6320                     __atomic_sub_fetch(&cnt->shared_info.refcnt, 1,
6321                                        __ATOMIC_RELAXED))
6322                         return;
6323         }
6324         cnt->pool = pool;
6325         /*
6326          * Put the counter back to list to be updated in none fallback mode.
6327          * Currently, we are using two list alternately, while one is in query,
6328          * add the freed counter to the other list based on the pool query_gen
6329          * value. After query finishes, add counter the list to the global
6330          * container counter list. The list changes while query starts. In
6331          * this case, lock will not be needed as query callback and release
6332          * function both operate with the different list.
6333          */
6334         if (!priv->sh->cmng.counter_fallback) {
6335                 rte_spinlock_lock(&pool->csl);
6336                 TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
6337                 rte_spinlock_unlock(&pool->csl);
6338         } else {
6339                 cnt->dcs_when_free = cnt->dcs_when_active;
6340                 cnt_type = pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
6341                                            MLX5_COUNTER_TYPE_ORIGIN;
6342                 rte_spinlock_lock(&priv->sh->cmng.csl[cnt_type]);
6343                 TAILQ_INSERT_TAIL(&priv->sh->cmng.counters[cnt_type],
6344                                   cnt, next);
6345                 rte_spinlock_unlock(&priv->sh->cmng.csl[cnt_type]);
6346         }
6347 }
6348
6349 /**
6350  * Resize a meter id container.
6351  *
6352  * @param[in] dev
6353  *   Pointer to the Ethernet device structure.
6354  *
6355  * @return
6356  *   0 on success, otherwise negative errno value and rte_errno is set.
6357  */
6358 static int
6359 flow_dv_mtr_container_resize(struct rte_eth_dev *dev)
6360 {
6361         struct mlx5_priv *priv = dev->data->dev_private;
6362         struct mlx5_aso_mtr_pools_mng *pools_mng =
6363                                 &priv->sh->mtrmng->pools_mng;
6364         void *old_pools = pools_mng->pools;
6365         uint32_t resize = pools_mng->n + MLX5_MTRS_CONTAINER_RESIZE;
6366         uint32_t mem_size = sizeof(struct mlx5_aso_mtr_pool *) * resize;
6367         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
6368
6369         if (!pools) {
6370                 rte_errno = ENOMEM;
6371                 return -ENOMEM;
6372         }
6373         if (!pools_mng->n)
6374                 if (mlx5_aso_queue_init(priv->sh, ASO_OPC_MOD_POLICER)) {
6375                         mlx5_free(pools);
6376                         return -ENOMEM;
6377                 }
6378         if (old_pools)
6379                 memcpy(pools, old_pools, pools_mng->n *
6380                                        sizeof(struct mlx5_aso_mtr_pool *));
6381         pools_mng->n = resize;
6382         pools_mng->pools = pools;
6383         if (old_pools)
6384                 mlx5_free(old_pools);
6385         return 0;
6386 }
6387
6388 /**
6389  * Prepare a new meter and/or a new meter pool.
6390  *
6391  * @param[in] dev
6392  *   Pointer to the Ethernet device structure.
6393  * @param[out] mtr_free
6394  *   Where to put the pointer of a new meter.g.
6395  *
6396  * @return
6397  *   The meter pool pointer and @mtr_free is set on success,
6398  *   NULL otherwise and rte_errno is set.
6399  */
6400 static struct mlx5_aso_mtr_pool *
6401 flow_dv_mtr_pool_create(struct rte_eth_dev *dev,
6402                              struct mlx5_aso_mtr **mtr_free)
6403 {
6404         struct mlx5_priv *priv = dev->data->dev_private;
6405         struct mlx5_aso_mtr_pools_mng *pools_mng =
6406                                 &priv->sh->mtrmng->pools_mng;
6407         struct mlx5_aso_mtr_pool *pool = NULL;
6408         struct mlx5_devx_obj *dcs = NULL;
6409         uint32_t i;
6410         uint32_t log_obj_size;
6411
6412         log_obj_size = rte_log2_u32(MLX5_ASO_MTRS_PER_POOL >> 1);
6413         dcs = mlx5_devx_cmd_create_flow_meter_aso_obj(priv->sh->ctx,
6414                         priv->sh->pdn, log_obj_size);
6415         if (!dcs) {
6416                 rte_errno = ENODATA;
6417                 return NULL;
6418         }
6419         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
6420         if (!pool) {
6421                 rte_errno = ENOMEM;
6422                 claim_zero(mlx5_devx_cmd_destroy(dcs));
6423                 return NULL;
6424         }
6425         pool->devx_obj = dcs;
6426         pool->index = pools_mng->n_valid;
6427         if (pool->index == pools_mng->n && flow_dv_mtr_container_resize(dev)) {
6428                 mlx5_free(pool);
6429                 claim_zero(mlx5_devx_cmd_destroy(dcs));
6430                 return NULL;
6431         }
6432         pools_mng->pools[pool->index] = pool;
6433         pools_mng->n_valid++;
6434         for (i = 1; i < MLX5_ASO_MTRS_PER_POOL; ++i) {
6435                 pool->mtrs[i].offset = i;
6436                 LIST_INSERT_HEAD(&pools_mng->meters,
6437                                                 &pool->mtrs[i], next);
6438         }
6439         pool->mtrs[0].offset = 0;
6440         *mtr_free = &pool->mtrs[0];
6441         return pool;
6442 }
6443
6444 /**
6445  * Release a flow meter into pool.
6446  *
6447  * @param[in] dev
6448  *   Pointer to the Ethernet device structure.
6449  * @param[in] mtr_idx
6450  *   Index to aso flow meter.
6451  */
6452 static void
6453 flow_dv_aso_mtr_release_to_pool(struct rte_eth_dev *dev, uint32_t mtr_idx)
6454 {
6455         struct mlx5_priv *priv = dev->data->dev_private;
6456         struct mlx5_aso_mtr_pools_mng *pools_mng =
6457                                 &priv->sh->mtrmng->pools_mng;
6458         struct mlx5_aso_mtr *aso_mtr = mlx5_aso_meter_by_idx(priv, mtr_idx);
6459
6460         MLX5_ASSERT(aso_mtr);
6461         rte_spinlock_lock(&pools_mng->mtrsl);
6462         memset(&aso_mtr->fm, 0, sizeof(struct mlx5_flow_meter_info));
6463         aso_mtr->state = ASO_METER_FREE;
6464         LIST_INSERT_HEAD(&pools_mng->meters, aso_mtr, next);
6465         rte_spinlock_unlock(&pools_mng->mtrsl);
6466 }
6467
6468 /**
6469  * Allocate a aso flow meter.
6470  *
6471  * @param[in] dev
6472  *   Pointer to the Ethernet device structure.
6473  *
6474  * @return
6475  *   Index to aso flow meter on success, 0 otherwise and rte_errno is set.
6476  */
6477 static uint32_t
6478 flow_dv_mtr_alloc(struct rte_eth_dev *dev)
6479 {
6480         struct mlx5_priv *priv = dev->data->dev_private;
6481         struct mlx5_aso_mtr *mtr_free = NULL;
6482         struct mlx5_aso_mtr_pools_mng *pools_mng =
6483                                 &priv->sh->mtrmng->pools_mng;
6484         struct mlx5_aso_mtr_pool *pool;
6485         uint32_t mtr_idx = 0;
6486
6487         if (!priv->config.devx) {
6488                 rte_errno = ENOTSUP;
6489                 return 0;
6490         }
6491         /* Allocate the flow meter memory. */
6492         /* Get free meters from management. */
6493         rte_spinlock_lock(&pools_mng->mtrsl);
6494         mtr_free = LIST_FIRST(&pools_mng->meters);
6495         if (mtr_free)
6496                 LIST_REMOVE(mtr_free, next);
6497         if (!mtr_free && !flow_dv_mtr_pool_create(dev, &mtr_free)) {
6498                 rte_spinlock_unlock(&pools_mng->mtrsl);
6499                 return 0;
6500         }
6501         mtr_free->state = ASO_METER_WAIT;
6502         rte_spinlock_unlock(&pools_mng->mtrsl);
6503         pool = container_of(mtr_free,
6504                         struct mlx5_aso_mtr_pool,
6505                         mtrs[mtr_free->offset]);
6506         mtr_idx = MLX5_MAKE_MTR_IDX(pool->index, mtr_free->offset);
6507         if (!mtr_free->fm.meter_action) {
6508 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
6509                 struct rte_flow_error error;
6510                 uint8_t reg_id;
6511
6512                 reg_id = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &error);
6513                 mtr_free->fm.meter_action =
6514                         mlx5_glue->dv_create_flow_action_aso
6515                                                 (priv->sh->rx_domain,
6516                                                  pool->devx_obj->obj,
6517                                                  mtr_free->offset,
6518                                                  (1 << MLX5_FLOW_COLOR_GREEN),
6519                                                  reg_id - REG_C_0);
6520 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
6521                 if (!mtr_free->fm.meter_action) {
6522                         flow_dv_aso_mtr_release_to_pool(dev, mtr_idx);
6523                         return 0;
6524                 }
6525         }
6526         return mtr_idx;
6527 }
6528
6529 /**
6530  * Verify the @p attributes will be correctly understood by the NIC and store
6531  * them in the @p flow if everything is correct.
6532  *
6533  * @param[in] dev
6534  *   Pointer to dev struct.
6535  * @param[in] attributes
6536  *   Pointer to flow attributes
6537  * @param[in] external
6538  *   This flow rule is created by request external to PMD.
6539  * @param[out] error
6540  *   Pointer to error structure.
6541  *
6542  * @return
6543  *   - 0 on success and non root table.
6544  *   - 1 on success and root table.
6545  *   - a negative errno value otherwise and rte_errno is set.
6546  */
6547 static int
6548 flow_dv_validate_attributes(struct rte_eth_dev *dev,
6549                             const struct mlx5_flow_tunnel *tunnel,
6550                             const struct rte_flow_attr *attributes,
6551                             const struct flow_grp_info *grp_info,
6552                             struct rte_flow_error *error)
6553 {
6554         struct mlx5_priv *priv = dev->data->dev_private;
6555         uint32_t lowest_priority = mlx5_get_lowest_priority(dev, attributes);
6556         int ret = 0;
6557
6558 #ifndef HAVE_MLX5DV_DR
6559         RTE_SET_USED(tunnel);
6560         RTE_SET_USED(grp_info);
6561         if (attributes->group)
6562                 return rte_flow_error_set(error, ENOTSUP,
6563                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
6564                                           NULL,
6565                                           "groups are not supported");
6566 #else
6567         uint32_t table = 0;
6568
6569         ret = mlx5_flow_group_to_table(dev, tunnel, attributes->group, &table,
6570                                        grp_info, error);
6571         if (ret)
6572                 return ret;
6573         if (!table)
6574                 ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
6575 #endif
6576         if (attributes->priority != MLX5_FLOW_LOWEST_PRIO_INDICATOR &&
6577             attributes->priority > lowest_priority)
6578                 return rte_flow_error_set(error, ENOTSUP,
6579                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
6580                                           NULL,
6581                                           "priority out of range");
6582         if (attributes->transfer) {
6583                 if (!priv->config.dv_esw_en)
6584                         return rte_flow_error_set
6585                                 (error, ENOTSUP,
6586                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6587                                  "E-Switch dr is not supported");
6588                 if (!(priv->representor || priv->master))
6589                         return rte_flow_error_set
6590                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6591                                  NULL, "E-Switch configuration can only be"
6592                                  " done by a master or a representor device");
6593                 if (attributes->egress)
6594                         return rte_flow_error_set
6595                                 (error, ENOTSUP,
6596                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
6597                                  "egress is not supported");
6598         }
6599         if (!(attributes->egress ^ attributes->ingress))
6600                 return rte_flow_error_set(error, ENOTSUP,
6601                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
6602                                           "must specify exactly one of "
6603                                           "ingress or egress");
6604         return ret;
6605 }
6606
6607 static uint16_t
6608 mlx5_flow_locate_proto_l3(const struct rte_flow_item **head,
6609                           const struct rte_flow_item *end)
6610 {
6611         const struct rte_flow_item *item = *head;
6612         uint16_t l3_protocol;
6613
6614         for (; item != end; item++) {
6615                 switch (item->type) {
6616                 default:
6617                         break;
6618                 case RTE_FLOW_ITEM_TYPE_IPV4:
6619                         l3_protocol = RTE_ETHER_TYPE_IPV4;
6620                         goto l3_ok;
6621                 case RTE_FLOW_ITEM_TYPE_IPV6:
6622                         l3_protocol = RTE_ETHER_TYPE_IPV6;
6623                         goto l3_ok;
6624                 case RTE_FLOW_ITEM_TYPE_ETH:
6625                         if (item->mask && item->spec) {
6626                                 MLX5_ETHER_TYPE_FROM_HEADER(rte_flow_item_eth,
6627                                                             type, item,
6628                                                             l3_protocol);
6629                                 if (l3_protocol == RTE_ETHER_TYPE_IPV4 ||
6630                                     l3_protocol == RTE_ETHER_TYPE_IPV6)
6631                                         goto l3_ok;
6632                         }
6633                         break;
6634                 case RTE_FLOW_ITEM_TYPE_VLAN:
6635                         if (item->mask && item->spec) {
6636                                 MLX5_ETHER_TYPE_FROM_HEADER(rte_flow_item_vlan,
6637                                                             inner_type, item,
6638                                                             l3_protocol);
6639                                 if (l3_protocol == RTE_ETHER_TYPE_IPV4 ||
6640                                     l3_protocol == RTE_ETHER_TYPE_IPV6)
6641                                         goto l3_ok;
6642                         }
6643                         break;
6644                 }
6645         }
6646         return 0;
6647 l3_ok:
6648         *head = item;
6649         return l3_protocol;
6650 }
6651
6652 static uint8_t
6653 mlx5_flow_locate_proto_l4(const struct rte_flow_item **head,
6654                           const struct rte_flow_item *end)
6655 {
6656         const struct rte_flow_item *item = *head;
6657         uint8_t l4_protocol;
6658
6659         for (; item != end; item++) {
6660                 switch (item->type) {
6661                 default:
6662                         break;
6663                 case RTE_FLOW_ITEM_TYPE_TCP:
6664                         l4_protocol = IPPROTO_TCP;
6665                         goto l4_ok;
6666                 case RTE_FLOW_ITEM_TYPE_UDP:
6667                         l4_protocol = IPPROTO_UDP;
6668                         goto l4_ok;
6669                 case RTE_FLOW_ITEM_TYPE_IPV4:
6670                         if (item->mask && item->spec) {
6671                                 const struct rte_flow_item_ipv4 *mask, *spec;
6672
6673                                 mask = (typeof(mask))item->mask;
6674                                 spec = (typeof(spec))item->spec;
6675                                 l4_protocol = mask->hdr.next_proto_id &
6676                                               spec->hdr.next_proto_id;
6677                                 if (l4_protocol == IPPROTO_TCP ||
6678                                     l4_protocol == IPPROTO_UDP)
6679                                         goto l4_ok;
6680                         }
6681                         break;
6682                 case RTE_FLOW_ITEM_TYPE_IPV6:
6683                         if (item->mask && item->spec) {
6684                                 const struct rte_flow_item_ipv6 *mask, *spec;
6685                                 mask = (typeof(mask))item->mask;
6686                                 spec = (typeof(spec))item->spec;
6687                                 l4_protocol = mask->hdr.proto & spec->hdr.proto;
6688                                 if (l4_protocol == IPPROTO_TCP ||
6689                                     l4_protocol == IPPROTO_UDP)
6690                                         goto l4_ok;
6691                         }
6692                         break;
6693                 }
6694         }
6695         return 0;
6696 l4_ok:
6697         *head = item;
6698         return l4_protocol;
6699 }
6700
6701 static int
6702 flow_dv_validate_item_integrity(struct rte_eth_dev *dev,
6703                                 const struct rte_flow_item *rule_items,
6704                                 const struct rte_flow_item *integrity_item,
6705                                 struct rte_flow_error *error)
6706 {
6707         struct mlx5_priv *priv = dev->data->dev_private;
6708         const struct rte_flow_item *tunnel_item, *end_item, *item = rule_items;
6709         const struct rte_flow_item_integrity *mask = (typeof(mask))
6710                                                      integrity_item->mask;
6711         const struct rte_flow_item_integrity *spec = (typeof(spec))
6712                                                      integrity_item->spec;
6713         uint32_t protocol;
6714
6715         if (!priv->config.hca_attr.pkt_integrity_match)
6716                 return rte_flow_error_set(error, ENOTSUP,
6717                                           RTE_FLOW_ERROR_TYPE_ITEM,
6718                                           integrity_item,
6719                                           "packet integrity integrity_item not supported");
6720         if (!mask)
6721                 mask = &rte_flow_item_integrity_mask;
6722         if (!mlx5_validate_integrity_item(mask))
6723                 return rte_flow_error_set(error, ENOTSUP,
6724                                           RTE_FLOW_ERROR_TYPE_ITEM,
6725                                           integrity_item,
6726                                           "unsupported integrity filter");
6727         tunnel_item = mlx5_flow_find_tunnel_item(rule_items);
6728         if (spec->level > 1) {
6729                 if (!tunnel_item)
6730                         return rte_flow_error_set(error, ENOTSUP,
6731                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6732                                                   integrity_item,
6733                                                   "missing tunnel item");
6734                 item = tunnel_item;
6735                 end_item = mlx5_find_end_item(tunnel_item);
6736         } else {
6737                 end_item = tunnel_item ? tunnel_item :
6738                            mlx5_find_end_item(integrity_item);
6739         }
6740         if (mask->l3_ok || mask->ipv4_csum_ok) {
6741                 protocol = mlx5_flow_locate_proto_l3(&item, end_item);
6742                 if (!protocol)
6743                         return rte_flow_error_set(error, EINVAL,
6744                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6745                                                   integrity_item,
6746                                                   "missing L3 protocol");
6747         }
6748         if (mask->l4_ok || mask->l4_csum_ok) {
6749                 protocol = mlx5_flow_locate_proto_l4(&item, end_item);
6750                 if (!protocol)
6751                         return rte_flow_error_set(error, EINVAL,
6752                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6753                                                   integrity_item,
6754                                                   "missing L4 protocol");
6755         }
6756         return 0;
6757 }
6758
6759 /**
6760  * Internal validation function. For validating both actions and items.
6761  *
6762  * @param[in] dev
6763  *   Pointer to the rte_eth_dev structure.
6764  * @param[in] attr
6765  *   Pointer to the flow attributes.
6766  * @param[in] items
6767  *   Pointer to the list of items.
6768  * @param[in] actions
6769  *   Pointer to the list of actions.
6770  * @param[in] external
6771  *   This flow rule is created by request external to PMD.
6772  * @param[in] hairpin
6773  *   Number of hairpin TX actions, 0 means classic flow.
6774  * @param[out] error
6775  *   Pointer to the error structure.
6776  *
6777  * @return
6778  *   0 on success, a negative errno value otherwise and rte_errno is set.
6779  */
6780 static int
6781 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
6782                  const struct rte_flow_item items[],
6783                  const struct rte_flow_action actions[],
6784                  bool external, int hairpin, struct rte_flow_error *error)
6785 {
6786         int ret;
6787         uint64_t action_flags = 0;
6788         uint64_t item_flags = 0;
6789         uint64_t last_item = 0;
6790         uint8_t next_protocol = 0xff;
6791         uint16_t ether_type = 0;
6792         int actions_n = 0;
6793         uint8_t item_ipv6_proto = 0;
6794         int fdb_mirror_limit = 0;
6795         int modify_after_mirror = 0;
6796         const struct rte_flow_item *geneve_item = NULL;
6797         const struct rte_flow_item *gre_item = NULL;
6798         const struct rte_flow_item *gtp_item = NULL;
6799         const struct rte_flow_action_raw_decap *decap;
6800         const struct rte_flow_action_raw_encap *encap;
6801         const struct rte_flow_action_rss *rss = NULL;
6802         const struct rte_flow_action_rss *sample_rss = NULL;
6803         const struct rte_flow_action_count *sample_count = NULL;
6804         const struct rte_flow_item_tcp nic_tcp_mask = {
6805                 .hdr = {
6806                         .tcp_flags = 0xFF,
6807                         .src_port = RTE_BE16(UINT16_MAX),
6808                         .dst_port = RTE_BE16(UINT16_MAX),
6809                 }
6810         };
6811         const struct rte_flow_item_ipv6 nic_ipv6_mask = {
6812                 .hdr = {
6813                         .src_addr =
6814                         "\xff\xff\xff\xff\xff\xff\xff\xff"
6815                         "\xff\xff\xff\xff\xff\xff\xff\xff",
6816                         .dst_addr =
6817                         "\xff\xff\xff\xff\xff\xff\xff\xff"
6818                         "\xff\xff\xff\xff\xff\xff\xff\xff",
6819                         .vtc_flow = RTE_BE32(0xffffffff),
6820                         .proto = 0xff,
6821                         .hop_limits = 0xff,
6822                 },
6823                 .has_frag_ext = 1,
6824         };
6825         const struct rte_flow_item_ecpri nic_ecpri_mask = {
6826                 .hdr = {
6827                         .common = {
6828                                 .u32 =
6829                                 RTE_BE32(((const struct rte_ecpri_common_hdr) {
6830                                         .type = 0xFF,
6831                                         }).u32),
6832                         },
6833                         .dummy[0] = 0xffffffff,
6834                 },
6835         };
6836         struct mlx5_priv *priv = dev->data->dev_private;
6837         struct mlx5_dev_config *dev_conf = &priv->config;
6838         uint16_t queue_index = 0xFFFF;
6839         const struct rte_flow_item_vlan *vlan_m = NULL;
6840         uint32_t rw_act_num = 0;
6841         uint64_t is_root;
6842         const struct mlx5_flow_tunnel *tunnel;
6843         enum mlx5_tof_rule_type tof_rule_type;
6844         struct flow_grp_info grp_info = {
6845                 .external = !!external,
6846                 .transfer = !!attr->transfer,
6847                 .fdb_def_rule = !!priv->fdb_def_rule,
6848                 .std_tbl_fix = true,
6849         };
6850         const struct rte_eth_hairpin_conf *conf;
6851         const struct rte_flow_item *rule_items = items;
6852         const struct rte_flow_item *port_id_item = NULL;
6853         bool def_policy = false;
6854
6855         if (items == NULL)
6856                 return -1;
6857         tunnel = is_tunnel_offload_active(dev) ?
6858                  mlx5_get_tof(items, actions, &tof_rule_type) : NULL;
6859         if (tunnel) {
6860                 if (priv->representor)
6861                         return rte_flow_error_set
6862                                 (error, ENOTSUP,
6863                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6864                                  NULL, "decap not supported for VF representor");
6865                 if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_SET_RULE)
6866                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
6867                 else if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_MATCH_RULE)
6868                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |
6869                                         MLX5_FLOW_ACTION_DECAP;
6870                 grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
6871                                         (dev, attr, tunnel, tof_rule_type);
6872         }
6873         ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error);
6874         if (ret < 0)
6875                 return ret;
6876         is_root = (uint64_t)ret;
6877         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
6878                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
6879                 int type = items->type;
6880
6881                 if (!mlx5_flow_os_item_supported(type))
6882                         return rte_flow_error_set(error, ENOTSUP,
6883                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6884                                                   NULL, "item not supported");
6885                 switch (type) {
6886                 case RTE_FLOW_ITEM_TYPE_VOID:
6887                         break;
6888                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
6889                         ret = flow_dv_validate_item_port_id
6890                                         (dev, items, attr, item_flags, error);
6891                         if (ret < 0)
6892                                 return ret;
6893                         last_item = MLX5_FLOW_ITEM_PORT_ID;
6894                         port_id_item = items;
6895                         break;
6896                 case RTE_FLOW_ITEM_TYPE_ETH:
6897                         ret = mlx5_flow_validate_item_eth(items, item_flags,
6898                                                           true, error);
6899                         if (ret < 0)
6900                                 return ret;
6901                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
6902                                              MLX5_FLOW_LAYER_OUTER_L2;
6903                         if (items->mask != NULL && items->spec != NULL) {
6904                                 ether_type =
6905                                         ((const struct rte_flow_item_eth *)
6906                                          items->spec)->type;
6907                                 ether_type &=
6908                                         ((const struct rte_flow_item_eth *)
6909                                          items->mask)->type;
6910                                 ether_type = rte_be_to_cpu_16(ether_type);
6911                         } else {
6912                                 ether_type = 0;
6913                         }
6914                         break;
6915                 case RTE_FLOW_ITEM_TYPE_VLAN:
6916                         ret = flow_dv_validate_item_vlan(items, item_flags,
6917                                                          dev, error);
6918                         if (ret < 0)
6919                                 return ret;
6920                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
6921                                              MLX5_FLOW_LAYER_OUTER_VLAN;
6922                         if (items->mask != NULL && items->spec != NULL) {
6923                                 ether_type =
6924                                         ((const struct rte_flow_item_vlan *)
6925                                          items->spec)->inner_type;
6926                                 ether_type &=
6927                                         ((const struct rte_flow_item_vlan *)
6928                                          items->mask)->inner_type;
6929                                 ether_type = rte_be_to_cpu_16(ether_type);
6930                         } else {
6931                                 ether_type = 0;
6932                         }
6933                         /* Store outer VLAN mask for of_push_vlan action. */
6934                         if (!tunnel)
6935                                 vlan_m = items->mask;
6936                         break;
6937                 case RTE_FLOW_ITEM_TYPE_IPV4:
6938                         mlx5_flow_tunnel_ip_check(items, next_protocol,
6939                                                   &item_flags, &tunnel);
6940                         ret = flow_dv_validate_item_ipv4(items, item_flags,
6941                                                          last_item, ether_type,
6942                                                          error);
6943                         if (ret < 0)
6944                                 return ret;
6945                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
6946                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
6947                         if (items->mask != NULL &&
6948                             ((const struct rte_flow_item_ipv4 *)
6949                              items->mask)->hdr.next_proto_id) {
6950                                 next_protocol =
6951                                         ((const struct rte_flow_item_ipv4 *)
6952                                          (items->spec))->hdr.next_proto_id;
6953                                 next_protocol &=
6954                                         ((const struct rte_flow_item_ipv4 *)
6955                                          (items->mask))->hdr.next_proto_id;
6956                         } else {
6957                                 /* Reset for inner layer. */
6958                                 next_protocol = 0xff;
6959                         }
6960                         break;
6961                 case RTE_FLOW_ITEM_TYPE_IPV6:
6962                         mlx5_flow_tunnel_ip_check(items, next_protocol,
6963                                                   &item_flags, &tunnel);
6964                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
6965                                                            last_item,
6966                                                            ether_type,
6967                                                            &nic_ipv6_mask,
6968                                                            error);
6969                         if (ret < 0)
6970                                 return ret;
6971                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
6972                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
6973                         if (items->mask != NULL &&
6974                             ((const struct rte_flow_item_ipv6 *)
6975                              items->mask)->hdr.proto) {
6976                                 item_ipv6_proto =
6977                                         ((const struct rte_flow_item_ipv6 *)
6978                                          items->spec)->hdr.proto;
6979                                 next_protocol =
6980                                         ((const struct rte_flow_item_ipv6 *)
6981                                          items->spec)->hdr.proto;
6982                                 next_protocol &=
6983                                         ((const struct rte_flow_item_ipv6 *)
6984                                          items->mask)->hdr.proto;
6985                         } else {
6986                                 /* Reset for inner layer. */
6987                                 next_protocol = 0xff;
6988                         }
6989                         break;
6990                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
6991                         ret = flow_dv_validate_item_ipv6_frag_ext(items,
6992                                                                   item_flags,
6993                                                                   error);
6994                         if (ret < 0)
6995                                 return ret;
6996                         last_item = tunnel ?
6997                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
6998                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
6999                         if (items->mask != NULL &&
7000                             ((const struct rte_flow_item_ipv6_frag_ext *)
7001                              items->mask)->hdr.next_header) {
7002                                 next_protocol =
7003                                 ((const struct rte_flow_item_ipv6_frag_ext *)
7004                                  items->spec)->hdr.next_header;
7005                                 next_protocol &=
7006                                 ((const struct rte_flow_item_ipv6_frag_ext *)
7007                                  items->mask)->hdr.next_header;
7008                         } else {
7009                                 /* Reset for inner layer. */
7010                                 next_protocol = 0xff;
7011                         }
7012                         break;
7013                 case RTE_FLOW_ITEM_TYPE_TCP:
7014                         ret = mlx5_flow_validate_item_tcp
7015                                                 (items, item_flags,
7016                                                  next_protocol,
7017                                                  &nic_tcp_mask,
7018                                                  error);
7019                         if (ret < 0)
7020                                 return ret;
7021                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
7022                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
7023                         break;
7024                 case RTE_FLOW_ITEM_TYPE_UDP:
7025                         ret = mlx5_flow_validate_item_udp(items, item_flags,
7026                                                           next_protocol,
7027                                                           error);
7028                         if (ret < 0)
7029                                 return ret;
7030                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
7031                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
7032                         break;
7033                 case RTE_FLOW_ITEM_TYPE_GRE:
7034                         ret = mlx5_flow_validate_item_gre(items, item_flags,
7035                                                           next_protocol, error);
7036                         if (ret < 0)
7037                                 return ret;
7038                         gre_item = items;
7039                         last_item = MLX5_FLOW_LAYER_GRE;
7040                         break;
7041                 case RTE_FLOW_ITEM_TYPE_NVGRE:
7042                         ret = mlx5_flow_validate_item_nvgre(items, item_flags,
7043                                                             next_protocol,
7044                                                             error);
7045                         if (ret < 0)
7046                                 return ret;
7047                         last_item = MLX5_FLOW_LAYER_NVGRE;
7048                         break;
7049                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
7050                         ret = mlx5_flow_validate_item_gre_key
7051                                 (items, item_flags, gre_item, error);
7052                         if (ret < 0)
7053                                 return ret;
7054                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
7055                         break;
7056                 case RTE_FLOW_ITEM_TYPE_VXLAN:
7057                         ret = mlx5_flow_validate_item_vxlan(dev, items,
7058                                                             item_flags, attr,
7059                                                             error);
7060                         if (ret < 0)
7061                                 return ret;
7062                         last_item = MLX5_FLOW_LAYER_VXLAN;
7063                         break;
7064                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
7065                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
7066                                                                 item_flags, dev,
7067                                                                 error);
7068                         if (ret < 0)
7069                                 return ret;
7070                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
7071                         break;
7072                 case RTE_FLOW_ITEM_TYPE_GENEVE:
7073                         ret = mlx5_flow_validate_item_geneve(items,
7074                                                              item_flags, dev,
7075                                                              error);
7076                         if (ret < 0)
7077                                 return ret;
7078                         geneve_item = items;
7079                         last_item = MLX5_FLOW_LAYER_GENEVE;
7080                         break;
7081                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
7082                         ret = mlx5_flow_validate_item_geneve_opt(items,
7083                                                                  last_item,
7084                                                                  geneve_item,
7085                                                                  dev,
7086                                                                  error);
7087                         if (ret < 0)
7088                                 return ret;
7089                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
7090                         break;
7091                 case RTE_FLOW_ITEM_TYPE_MPLS:
7092                         ret = mlx5_flow_validate_item_mpls(dev, items,
7093                                                            item_flags,
7094                                                            last_item, error);
7095                         if (ret < 0)
7096                                 return ret;
7097                         last_item = MLX5_FLOW_LAYER_MPLS;
7098                         break;
7099
7100                 case RTE_FLOW_ITEM_TYPE_MARK:
7101                         ret = flow_dv_validate_item_mark(dev, items, attr,
7102                                                          error);
7103                         if (ret < 0)
7104                                 return ret;
7105                         last_item = MLX5_FLOW_ITEM_MARK;
7106                         break;
7107                 case RTE_FLOW_ITEM_TYPE_META:
7108                         ret = flow_dv_validate_item_meta(dev, items, attr,
7109                                                          error);
7110                         if (ret < 0)
7111                                 return ret;
7112                         last_item = MLX5_FLOW_ITEM_METADATA;
7113                         break;
7114                 case RTE_FLOW_ITEM_TYPE_ICMP:
7115                         ret = mlx5_flow_validate_item_icmp(items, item_flags,
7116                                                            next_protocol,
7117                                                            error);
7118                         if (ret < 0)
7119                                 return ret;
7120                         last_item = MLX5_FLOW_LAYER_ICMP;
7121                         break;
7122                 case RTE_FLOW_ITEM_TYPE_ICMP6:
7123                         ret = mlx5_flow_validate_item_icmp6(items, item_flags,
7124                                                             next_protocol,
7125                                                             error);
7126                         if (ret < 0)
7127                                 return ret;
7128                         item_ipv6_proto = IPPROTO_ICMPV6;
7129                         last_item = MLX5_FLOW_LAYER_ICMP6;
7130                         break;
7131                 case RTE_FLOW_ITEM_TYPE_TAG:
7132                         ret = flow_dv_validate_item_tag(dev, items,
7133                                                         attr, error);
7134                         if (ret < 0)
7135                                 return ret;
7136                         last_item = MLX5_FLOW_ITEM_TAG;
7137                         break;
7138                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
7139                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
7140                         break;
7141                 case RTE_FLOW_ITEM_TYPE_GTP:
7142                         ret = flow_dv_validate_item_gtp(dev, items, item_flags,
7143                                                         error);
7144                         if (ret < 0)
7145                                 return ret;
7146                         gtp_item = items;
7147                         last_item = MLX5_FLOW_LAYER_GTP;
7148                         break;
7149                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
7150                         ret = flow_dv_validate_item_gtp_psc(items, last_item,
7151                                                             gtp_item, attr,
7152                                                             error);
7153                         if (ret < 0)
7154                                 return ret;
7155                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
7156                         break;
7157                 case RTE_FLOW_ITEM_TYPE_ECPRI:
7158                         /* Capacity will be checked in the translate stage. */
7159                         ret = mlx5_flow_validate_item_ecpri(items, item_flags,
7160                                                             last_item,
7161                                                             ether_type,
7162                                                             &nic_ecpri_mask,
7163                                                             error);
7164                         if (ret < 0)
7165                                 return ret;
7166                         last_item = MLX5_FLOW_LAYER_ECPRI;
7167                         break;
7168                 case RTE_FLOW_ITEM_TYPE_INTEGRITY:
7169                         if (item_flags & MLX5_FLOW_ITEM_INTEGRITY)
7170                                 return rte_flow_error_set
7171                                         (error, ENOTSUP,
7172                                          RTE_FLOW_ERROR_TYPE_ITEM,
7173                                          NULL, "multiple integrity items not supported");
7174                         ret = flow_dv_validate_item_integrity(dev, rule_items,
7175                                                               items, error);
7176                         if (ret < 0)
7177                                 return ret;
7178                         last_item = MLX5_FLOW_ITEM_INTEGRITY;
7179                         break;
7180                 case RTE_FLOW_ITEM_TYPE_CONNTRACK:
7181                         ret = flow_dv_validate_item_aso_ct(dev, items,
7182                                                            &item_flags, error);
7183                         if (ret < 0)
7184                                 return ret;
7185                         break;
7186                 case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
7187                         /* tunnel offload item was processed before
7188                          * list it here as a supported type
7189                          */
7190                         break;
7191                 default:
7192                         return rte_flow_error_set(error, ENOTSUP,
7193                                                   RTE_FLOW_ERROR_TYPE_ITEM,
7194                                                   NULL, "item not supported");
7195                 }
7196                 item_flags |= last_item;
7197         }
7198         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
7199                 int type = actions->type;
7200                 bool shared_count = false;
7201
7202                 if (!mlx5_flow_os_action_supported(type))
7203                         return rte_flow_error_set(error, ENOTSUP,
7204                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7205                                                   actions,
7206                                                   "action not supported");
7207                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
7208                         return rte_flow_error_set(error, ENOTSUP,
7209                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7210                                                   actions, "too many actions");
7211                 if (action_flags &
7212                         MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
7213                         return rte_flow_error_set(error, ENOTSUP,
7214                                 RTE_FLOW_ERROR_TYPE_ACTION,
7215                                 NULL, "meter action with policy "
7216                                 "must be the last action");
7217                 switch (type) {
7218                 case RTE_FLOW_ACTION_TYPE_VOID:
7219                         break;
7220                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
7221                         ret = flow_dv_validate_action_port_id(dev,
7222                                                               action_flags,
7223                                                               actions,
7224                                                               attr,
7225                                                               error);
7226                         if (ret)
7227                                 return ret;
7228                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
7229                         ++actions_n;
7230                         break;
7231                 case RTE_FLOW_ACTION_TYPE_FLAG:
7232                         ret = flow_dv_validate_action_flag(dev, action_flags,
7233                                                            attr, error);
7234                         if (ret < 0)
7235                                 return ret;
7236                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
7237                                 /* Count all modify-header actions as one. */
7238                                 if (!(action_flags &
7239                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
7240                                         ++actions_n;
7241                                 action_flags |= MLX5_FLOW_ACTION_FLAG |
7242                                                 MLX5_FLOW_ACTION_MARK_EXT;
7243                                 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7244                                         modify_after_mirror = 1;
7245
7246                         } else {
7247                                 action_flags |= MLX5_FLOW_ACTION_FLAG;
7248                                 ++actions_n;
7249                         }
7250                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
7251                         break;
7252                 case RTE_FLOW_ACTION_TYPE_MARK:
7253                         ret = flow_dv_validate_action_mark(dev, actions,
7254                                                            action_flags,
7255                                                            attr, error);
7256                         if (ret < 0)
7257                                 return ret;
7258                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
7259                                 /* Count all modify-header actions as one. */
7260                                 if (!(action_flags &
7261                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
7262                                         ++actions_n;
7263                                 action_flags |= MLX5_FLOW_ACTION_MARK |
7264                                                 MLX5_FLOW_ACTION_MARK_EXT;
7265                                 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7266                                         modify_after_mirror = 1;
7267                         } else {
7268                                 action_flags |= MLX5_FLOW_ACTION_MARK;
7269                                 ++actions_n;
7270                         }
7271                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
7272                         break;
7273                 case RTE_FLOW_ACTION_TYPE_SET_META:
7274                         ret = flow_dv_validate_action_set_meta(dev, actions,
7275                                                                action_flags,
7276                                                                attr, error);
7277                         if (ret < 0)
7278                                 return ret;
7279                         /* Count all modify-header actions as one action. */
7280                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7281                                 ++actions_n;
7282                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7283                                 modify_after_mirror = 1;
7284                         action_flags |= MLX5_FLOW_ACTION_SET_META;
7285                         rw_act_num += MLX5_ACT_NUM_SET_META;
7286                         break;
7287                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
7288                         ret = flow_dv_validate_action_set_tag(dev, actions,
7289                                                               action_flags,
7290                                                               attr, error);
7291                         if (ret < 0)
7292                                 return ret;
7293                         /* Count all modify-header actions as one action. */
7294                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7295                                 ++actions_n;
7296                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7297                                 modify_after_mirror = 1;
7298                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
7299                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7300                         break;
7301                 case RTE_FLOW_ACTION_TYPE_DROP:
7302                         ret = mlx5_flow_validate_action_drop(action_flags,
7303                                                              attr, error);
7304                         if (ret < 0)
7305                                 return ret;
7306                         action_flags |= MLX5_FLOW_ACTION_DROP;
7307                         ++actions_n;
7308                         break;
7309                 case RTE_FLOW_ACTION_TYPE_QUEUE:
7310                         ret = mlx5_flow_validate_action_queue(actions,
7311                                                               action_flags, dev,
7312                                                               attr, error);
7313                         if (ret < 0)
7314                                 return ret;
7315                         queue_index = ((const struct rte_flow_action_queue *)
7316                                                         (actions->conf))->index;
7317                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
7318                         ++actions_n;
7319                         break;
7320                 case RTE_FLOW_ACTION_TYPE_RSS:
7321                         rss = actions->conf;
7322                         ret = mlx5_flow_validate_action_rss(actions,
7323                                                             action_flags, dev,
7324                                                             attr, item_flags,
7325                                                             error);
7326                         if (ret < 0)
7327                                 return ret;
7328                         if (rss && sample_rss &&
7329                             (sample_rss->level != rss->level ||
7330                             sample_rss->types != rss->types))
7331                                 return rte_flow_error_set(error, ENOTSUP,
7332                                         RTE_FLOW_ERROR_TYPE_ACTION,
7333                                         NULL,
7334                                         "Can't use the different RSS types "
7335                                         "or level in the same flow");
7336                         if (rss != NULL && rss->queue_num)
7337                                 queue_index = rss->queue[0];
7338                         action_flags |= MLX5_FLOW_ACTION_RSS;
7339                         ++actions_n;
7340                         break;
7341                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
7342                         ret =
7343                         mlx5_flow_validate_action_default_miss(action_flags,
7344                                         attr, error);
7345                         if (ret < 0)
7346                                 return ret;
7347                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
7348                         ++actions_n;
7349                         break;
7350                 case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
7351                 case RTE_FLOW_ACTION_TYPE_COUNT:
7352                         shared_count = is_shared_action_count(actions);
7353                         ret = flow_dv_validate_action_count(dev, shared_count,
7354                                                             action_flags,
7355                                                             error);
7356                         if (ret < 0)
7357                                 return ret;
7358                         action_flags |= MLX5_FLOW_ACTION_COUNT;
7359                         ++actions_n;
7360                         break;
7361                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
7362                         if (flow_dv_validate_action_pop_vlan(dev,
7363                                                              action_flags,
7364                                                              actions,
7365                                                              item_flags, attr,
7366                                                              error))
7367                                 return -rte_errno;
7368                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7369                                 modify_after_mirror = 1;
7370                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
7371                         ++actions_n;
7372                         break;
7373                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
7374                         ret = flow_dv_validate_action_push_vlan(dev,
7375                                                                 action_flags,
7376                                                                 vlan_m,
7377                                                                 actions, attr,
7378                                                                 error);
7379                         if (ret < 0)
7380                                 return ret;
7381                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7382                                 modify_after_mirror = 1;
7383                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
7384                         ++actions_n;
7385                         break;
7386                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
7387                         ret = flow_dv_validate_action_set_vlan_pcp
7388                                                 (action_flags, actions, error);
7389                         if (ret < 0)
7390                                 return ret;
7391                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7392                                 modify_after_mirror = 1;
7393                         /* Count PCP with push_vlan command. */
7394                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
7395                         break;
7396                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
7397                         ret = flow_dv_validate_action_set_vlan_vid
7398                                                 (item_flags, action_flags,
7399                                                  actions, error);
7400                         if (ret < 0)
7401                                 return ret;
7402                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7403                                 modify_after_mirror = 1;
7404                         /* Count VID with push_vlan command. */
7405                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
7406                         rw_act_num += MLX5_ACT_NUM_MDF_VID;
7407                         break;
7408                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
7409                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
7410                         ret = flow_dv_validate_action_l2_encap(dev,
7411                                                                action_flags,
7412                                                                actions, attr,
7413                                                                error);
7414                         if (ret < 0)
7415                                 return ret;
7416                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
7417                         ++actions_n;
7418                         break;
7419                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
7420                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
7421                         ret = flow_dv_validate_action_decap(dev, action_flags,
7422                                                             actions, item_flags,
7423                                                             attr, error);
7424                         if (ret < 0)
7425                                 return ret;
7426                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7427                                 modify_after_mirror = 1;
7428                         action_flags |= MLX5_FLOW_ACTION_DECAP;
7429                         ++actions_n;
7430                         break;
7431                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
7432                         ret = flow_dv_validate_action_raw_encap_decap
7433                                 (dev, NULL, actions->conf, attr, &action_flags,
7434                                  &actions_n, actions, item_flags, error);
7435                         if (ret < 0)
7436                                 return ret;
7437                         break;
7438                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
7439                         decap = actions->conf;
7440                         while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
7441                                 ;
7442                         if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
7443                                 encap = NULL;
7444                                 actions--;
7445                         } else {
7446                                 encap = actions->conf;
7447                         }
7448                         ret = flow_dv_validate_action_raw_encap_decap
7449                                            (dev,
7450                                             decap ? decap : &empty_decap, encap,
7451                                             attr, &action_flags, &actions_n,
7452                                             actions, item_flags, error);
7453                         if (ret < 0)
7454                                 return ret;
7455                         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
7456                             (action_flags & MLX5_FLOW_ACTION_DECAP))
7457                                 modify_after_mirror = 1;
7458                         break;
7459                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
7460                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
7461                         ret = flow_dv_validate_action_modify_mac(action_flags,
7462                                                                  actions,
7463                                                                  item_flags,
7464                                                                  error);
7465                         if (ret < 0)
7466                                 return ret;
7467                         /* Count all modify-header actions as one action. */
7468                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7469                                 ++actions_n;
7470                         action_flags |= actions->type ==
7471                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
7472                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
7473                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
7474                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7475                                 modify_after_mirror = 1;
7476                         /*
7477                          * Even if the source and destination MAC addresses have
7478                          * overlap in the header with 4B alignment, the convert
7479                          * function will handle them separately and 4 SW actions
7480                          * will be created. And 2 actions will be added each
7481                          * time no matter how many bytes of address will be set.
7482                          */
7483                         rw_act_num += MLX5_ACT_NUM_MDF_MAC;
7484                         break;
7485                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
7486                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
7487                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
7488                                                                   actions,
7489                                                                   item_flags,
7490                                                                   error);
7491                         if (ret < 0)
7492                                 return ret;
7493                         /* Count all modify-header actions as one action. */
7494                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7495                                 ++actions_n;
7496                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7497                                 modify_after_mirror = 1;
7498                         action_flags |= actions->type ==
7499                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
7500                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
7501                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
7502                         rw_act_num += MLX5_ACT_NUM_MDF_IPV4;
7503                         break;
7504                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
7505                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
7506                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
7507                                                                   actions,
7508                                                                   item_flags,
7509                                                                   error);
7510                         if (ret < 0)
7511                                 return ret;
7512                         if (item_ipv6_proto == IPPROTO_ICMPV6)
7513                                 return rte_flow_error_set(error, ENOTSUP,
7514                                         RTE_FLOW_ERROR_TYPE_ACTION,
7515                                         actions,
7516                                         "Can't change header "
7517                                         "with ICMPv6 proto");
7518                         /* Count all modify-header actions as one action. */
7519                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7520                                 ++actions_n;
7521                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7522                                 modify_after_mirror = 1;
7523                         action_flags |= actions->type ==
7524                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
7525                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
7526                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
7527                         rw_act_num += MLX5_ACT_NUM_MDF_IPV6;
7528                         break;
7529                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
7530                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
7531                         ret = flow_dv_validate_action_modify_tp(action_flags,
7532                                                                 actions,
7533                                                                 item_flags,
7534                                                                 error);
7535                         if (ret < 0)
7536                                 return ret;
7537                         /* Count all modify-header actions as one action. */
7538                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7539                                 ++actions_n;
7540                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7541                                 modify_after_mirror = 1;
7542                         action_flags |= actions->type ==
7543                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
7544                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
7545                                                 MLX5_FLOW_ACTION_SET_TP_DST;
7546                         rw_act_num += MLX5_ACT_NUM_MDF_PORT;
7547                         break;
7548                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
7549                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
7550                         ret = flow_dv_validate_action_modify_ttl(action_flags,
7551                                                                  actions,
7552                                                                  item_flags,
7553                                                                  error);
7554                         if (ret < 0)
7555                                 return ret;
7556                         /* Count all modify-header actions as one action. */
7557                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7558                                 ++actions_n;
7559                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7560                                 modify_after_mirror = 1;
7561                         action_flags |= actions->type ==
7562                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
7563                                                 MLX5_FLOW_ACTION_SET_TTL :
7564                                                 MLX5_FLOW_ACTION_DEC_TTL;
7565                         rw_act_num += MLX5_ACT_NUM_MDF_TTL;
7566                         break;
7567                 case RTE_FLOW_ACTION_TYPE_JUMP:
7568                         ret = flow_dv_validate_action_jump(dev, tunnel, actions,
7569                                                            action_flags,
7570                                                            attr, external,
7571                                                            error);
7572                         if (ret)
7573                                 return ret;
7574                         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
7575                             fdb_mirror_limit)
7576                                 return rte_flow_error_set(error, EINVAL,
7577                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7578                                                   NULL,
7579                                                   "sample and jump action combination is not supported");
7580                         ++actions_n;
7581                         action_flags |= MLX5_FLOW_ACTION_JUMP;
7582                         break;
7583                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
7584                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
7585                         ret = flow_dv_validate_action_modify_tcp_seq
7586                                                                 (action_flags,
7587                                                                  actions,
7588                                                                  item_flags,
7589                                                                  error);
7590                         if (ret < 0)
7591                                 return ret;
7592                         /* Count all modify-header actions as one action. */
7593                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7594                                 ++actions_n;
7595                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7596                                 modify_after_mirror = 1;
7597                         action_flags |= actions->type ==
7598                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
7599                                                 MLX5_FLOW_ACTION_INC_TCP_SEQ :
7600                                                 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
7601                         rw_act_num += MLX5_ACT_NUM_MDF_TCPSEQ;
7602                         break;
7603                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
7604                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
7605                         ret = flow_dv_validate_action_modify_tcp_ack
7606                                                                 (action_flags,
7607                                                                  actions,
7608                                                                  item_flags,
7609                                                                  error);
7610                         if (ret < 0)
7611                                 return ret;
7612                         /* Count all modify-header actions as one action. */
7613                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7614                                 ++actions_n;
7615                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7616                                 modify_after_mirror = 1;
7617                         action_flags |= actions->type ==
7618                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
7619                                                 MLX5_FLOW_ACTION_INC_TCP_ACK :
7620                                                 MLX5_FLOW_ACTION_DEC_TCP_ACK;
7621                         rw_act_num += MLX5_ACT_NUM_MDF_TCPACK;
7622                         break;
7623                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
7624                         break;
7625                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
7626                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
7627                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7628                         break;
7629                 case RTE_FLOW_ACTION_TYPE_METER:
7630                         ret = mlx5_flow_validate_action_meter(dev,
7631                                                               action_flags,
7632                                                               actions, attr,
7633                                                               port_id_item,
7634                                                               &def_policy,
7635                                                               error);
7636                         if (ret < 0)
7637                                 return ret;
7638                         action_flags |= MLX5_FLOW_ACTION_METER;
7639                         if (!def_policy)
7640                                 action_flags |=
7641                                 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
7642                         ++actions_n;
7643                         /* Meter action will add one more TAG action. */
7644                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7645                         break;
7646                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
7647                         if (!attr->transfer && !attr->group)
7648                                 return rte_flow_error_set(error, ENOTSUP,
7649                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7650                                                                            NULL,
7651                           "Shared ASO age action is not supported for group 0");
7652                         if (action_flags & MLX5_FLOW_ACTION_AGE)
7653                                 return rte_flow_error_set
7654                                                   (error, EINVAL,
7655                                                    RTE_FLOW_ERROR_TYPE_ACTION,
7656                                                    NULL,
7657                                                    "duplicate age actions set");
7658                         action_flags |= MLX5_FLOW_ACTION_AGE;
7659                         ++actions_n;
7660                         break;
7661                 case RTE_FLOW_ACTION_TYPE_AGE:
7662                         ret = flow_dv_validate_action_age(action_flags,
7663                                                           actions, dev,
7664                                                           error);
7665                         if (ret < 0)
7666                                 return ret;
7667                         /*
7668                          * Validate the regular AGE action (using counter)
7669                          * mutual exclusion with share counter actions.
7670                          */
7671                         if (!priv->sh->flow_hit_aso_en) {
7672                                 if (shared_count)
7673                                         return rte_flow_error_set
7674                                                 (error, EINVAL,
7675                                                 RTE_FLOW_ERROR_TYPE_ACTION,
7676                                                 NULL,
7677                                                 "old age and shared count combination is not supported");
7678                                 if (sample_count)
7679                                         return rte_flow_error_set
7680                                                 (error, EINVAL,
7681                                                 RTE_FLOW_ERROR_TYPE_ACTION,
7682                                                 NULL,
7683                                                 "old age action and count must be in the same sub flow");
7684                         }
7685                         action_flags |= MLX5_FLOW_ACTION_AGE;
7686                         ++actions_n;
7687                         break;
7688                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
7689                         ret = flow_dv_validate_action_modify_ipv4_dscp
7690                                                          (action_flags,
7691                                                           actions,
7692                                                           item_flags,
7693                                                           error);
7694                         if (ret < 0)
7695                                 return ret;
7696                         /* Count all modify-header actions as one action. */
7697                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7698                                 ++actions_n;
7699                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7700                                 modify_after_mirror = 1;
7701                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
7702                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
7703                         break;
7704                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
7705                         ret = flow_dv_validate_action_modify_ipv6_dscp
7706                                                                 (action_flags,
7707                                                                  actions,
7708                                                                  item_flags,
7709                                                                  error);
7710                         if (ret < 0)
7711                                 return ret;
7712                         /* Count all modify-header actions as one action. */
7713                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7714                                 ++actions_n;
7715                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7716                                 modify_after_mirror = 1;
7717                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
7718                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
7719                         break;
7720                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
7721                         ret = flow_dv_validate_action_sample(&action_flags,
7722                                                              actions, dev,
7723                                                              attr, item_flags,
7724                                                              rss, &sample_rss,
7725                                                              &sample_count,
7726                                                              &fdb_mirror_limit,
7727                                                              error);
7728                         if (ret < 0)
7729                                 return ret;
7730                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
7731                         ++actions_n;
7732                         break;
7733                 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
7734                         ret = flow_dv_validate_action_modify_field(dev,
7735                                                                    action_flags,
7736                                                                    actions,
7737                                                                    attr,
7738                                                                    error);
7739                         if (ret < 0)
7740                                 return ret;
7741                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7742                                 modify_after_mirror = 1;
7743                         /* Count all modify-header actions as one action. */
7744                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7745                                 ++actions_n;
7746                         action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
7747                         rw_act_num += ret;
7748                         break;
7749                 case RTE_FLOW_ACTION_TYPE_CONNTRACK:
7750                         ret = flow_dv_validate_action_aso_ct(dev, action_flags,
7751                                                              item_flags, attr,
7752                                                              error);
7753                         if (ret < 0)
7754                                 return ret;
7755                         action_flags |= MLX5_FLOW_ACTION_CT;
7756                         break;
7757                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
7758                         /* tunnel offload action was processed before
7759                          * list it here as a supported type
7760                          */
7761                         break;
7762                 default:
7763                         return rte_flow_error_set(error, ENOTSUP,
7764                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7765                                                   actions,
7766                                                   "action not supported");
7767                 }
7768         }
7769         /*
7770          * Validate actions in flow rules
7771          * - Explicit decap action is prohibited by the tunnel offload API.
7772          * - Drop action in tunnel steer rule is prohibited by the API.
7773          * - Application cannot use MARK action because it's value can mask
7774          *   tunnel default miss nitification.
7775          * - JUMP in tunnel match rule has no support in current PMD
7776          *   implementation.
7777          * - TAG & META are reserved for future uses.
7778          */
7779         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_SET) {
7780                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_DECAP    |
7781                                             MLX5_FLOW_ACTION_MARK     |
7782                                             MLX5_FLOW_ACTION_SET_TAG  |
7783                                             MLX5_FLOW_ACTION_SET_META |
7784                                             MLX5_FLOW_ACTION_DROP;
7785
7786                 if (action_flags & bad_actions_mask)
7787                         return rte_flow_error_set
7788                                         (error, EINVAL,
7789                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7790                                         "Invalid RTE action in tunnel "
7791                                         "set decap rule");
7792                 if (!(action_flags & MLX5_FLOW_ACTION_JUMP))
7793                         return rte_flow_error_set
7794                                         (error, EINVAL,
7795                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7796                                         "tunnel set decap rule must terminate "
7797                                         "with JUMP");
7798                 if (!attr->ingress)
7799                         return rte_flow_error_set
7800                                         (error, EINVAL,
7801                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7802                                         "tunnel flows for ingress traffic only");
7803         }
7804         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_MATCH) {
7805                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_JUMP    |
7806                                             MLX5_FLOW_ACTION_MARK    |
7807                                             MLX5_FLOW_ACTION_SET_TAG |
7808                                             MLX5_FLOW_ACTION_SET_META;
7809
7810                 if (action_flags & bad_actions_mask)
7811                         return rte_flow_error_set
7812                                         (error, EINVAL,
7813                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7814                                         "Invalid RTE action in tunnel "
7815                                         "set match rule");
7816         }
7817         /*
7818          * Validate the drop action mutual exclusion with other actions.
7819          * Drop action is mutually-exclusive with any other action, except for
7820          * Count action.
7821          * Drop action compatibility with tunnel offload was already validated.
7822          */
7823         if (action_flags & (MLX5_FLOW_ACTION_TUNNEL_MATCH |
7824                             MLX5_FLOW_ACTION_TUNNEL_MATCH));
7825         else if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
7826             (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
7827                 return rte_flow_error_set(error, EINVAL,
7828                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7829                                           "Drop action is mutually-exclusive "
7830                                           "with any other action, except for "
7831                                           "Count action");
7832         /* Eswitch has few restrictions on using items and actions */
7833         if (attr->transfer) {
7834                 if (!mlx5_flow_ext_mreg_supported(dev) &&
7835                     action_flags & MLX5_FLOW_ACTION_FLAG)
7836                         return rte_flow_error_set(error, ENOTSUP,
7837                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7838                                                   NULL,
7839                                                   "unsupported action FLAG");
7840                 if (!mlx5_flow_ext_mreg_supported(dev) &&
7841                     action_flags & MLX5_FLOW_ACTION_MARK)
7842                         return rte_flow_error_set(error, ENOTSUP,
7843                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7844                                                   NULL,
7845                                                   "unsupported action MARK");
7846                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
7847                         return rte_flow_error_set(error, ENOTSUP,
7848                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7849                                                   NULL,
7850                                                   "unsupported action QUEUE");
7851                 if (action_flags & MLX5_FLOW_ACTION_RSS)
7852                         return rte_flow_error_set(error, ENOTSUP,
7853                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7854                                                   NULL,
7855                                                   "unsupported action RSS");
7856                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
7857                         return rte_flow_error_set(error, EINVAL,
7858                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7859                                                   actions,
7860                                                   "no fate action is found");
7861         } else {
7862                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
7863                         return rte_flow_error_set(error, EINVAL,
7864                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7865                                                   actions,
7866                                                   "no fate action is found");
7867         }
7868         /*
7869          * Continue validation for Xcap and VLAN actions.
7870          * If hairpin is working in explicit TX rule mode, there is no actions
7871          * splitting and the validation of hairpin ingress flow should be the
7872          * same as other standard flows.
7873          */
7874         if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS |
7875                              MLX5_FLOW_VLAN_ACTIONS)) &&
7876             (queue_index == 0xFFFF ||
7877              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN ||
7878              ((conf = mlx5_rxq_get_hairpin_conf(dev, queue_index)) != NULL &&
7879              conf->tx_explicit != 0))) {
7880                 if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
7881                     MLX5_FLOW_XCAP_ACTIONS)
7882                         return rte_flow_error_set(error, ENOTSUP,
7883                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7884                                                   NULL, "encap and decap "
7885                                                   "combination aren't supported");
7886                 if (!attr->transfer && attr->ingress) {
7887                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
7888                                 return rte_flow_error_set
7889                                                 (error, ENOTSUP,
7890                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7891                                                  NULL, "encap is not supported"
7892                                                  " for ingress traffic");
7893                         else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
7894                                 return rte_flow_error_set
7895                                                 (error, ENOTSUP,
7896                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7897                                                  NULL, "push VLAN action not "
7898                                                  "supported for ingress");
7899                         else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) ==
7900                                         MLX5_FLOW_VLAN_ACTIONS)
7901                                 return rte_flow_error_set
7902                                                 (error, ENOTSUP,
7903                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7904                                                  NULL, "no support for "
7905                                                  "multiple VLAN actions");
7906                 }
7907         }
7908         if (action_flags & MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY) {
7909                 if ((action_flags & (MLX5_FLOW_FATE_ACTIONS &
7910                         ~MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)) &&
7911                         attr->ingress)
7912                         return rte_flow_error_set
7913                                 (error, ENOTSUP,
7914                                 RTE_FLOW_ERROR_TYPE_ACTION,
7915                                 NULL, "fate action not supported for "
7916                                 "meter with policy");
7917                 if (attr->egress) {
7918                         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
7919                                 return rte_flow_error_set
7920                                         (error, ENOTSUP,
7921                                         RTE_FLOW_ERROR_TYPE_ACTION,
7922                                         NULL, "modify header action in egress "
7923                                         "cannot be done before meter action");
7924                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
7925                                 return rte_flow_error_set
7926                                         (error, ENOTSUP,
7927                                         RTE_FLOW_ERROR_TYPE_ACTION,
7928                                         NULL, "encap action in egress "
7929                                         "cannot be done before meter action");
7930                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
7931                                 return rte_flow_error_set
7932                                         (error, ENOTSUP,
7933                                         RTE_FLOW_ERROR_TYPE_ACTION,
7934                                         NULL, "push vlan action in egress "
7935                                         "cannot be done before meter action");
7936                 }
7937         }
7938         /*
7939          * Hairpin flow will add one more TAG action in TX implicit mode.
7940          * In TX explicit mode, there will be no hairpin flow ID.
7941          */
7942         if (hairpin > 0)
7943                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
7944         /* extra metadata enabled: one more TAG action will be add. */
7945         if (dev_conf->dv_flow_en &&
7946             dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
7947             mlx5_flow_ext_mreg_supported(dev))
7948                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
7949         if (rw_act_num >
7950                         flow_dv_modify_hdr_action_max(dev, is_root)) {
7951                 return rte_flow_error_set(error, ENOTSUP,
7952                                           RTE_FLOW_ERROR_TYPE_ACTION,
7953                                           NULL, "too many header modify"
7954                                           " actions to support");
7955         }
7956         /* Eswitch egress mirror and modify flow has limitation on CX5 */
7957         if (fdb_mirror_limit && modify_after_mirror)
7958                 return rte_flow_error_set(error, EINVAL,
7959                                 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7960                                 "sample before modify action is not supported");
7961         return 0;
7962 }
7963
7964 /**
7965  * Internal preparation function. Allocates the DV flow size,
7966  * this size is constant.
7967  *
7968  * @param[in] dev
7969  *   Pointer to the rte_eth_dev structure.
7970  * @param[in] attr
7971  *   Pointer to the flow attributes.
7972  * @param[in] items
7973  *   Pointer to the list of items.
7974  * @param[in] actions
7975  *   Pointer to the list of actions.
7976  * @param[out] error
7977  *   Pointer to the error structure.
7978  *
7979  * @return
7980  *   Pointer to mlx5_flow object on success,
7981  *   otherwise NULL and rte_errno is set.
7982  */
7983 static struct mlx5_flow *
7984 flow_dv_prepare(struct rte_eth_dev *dev,
7985                 const struct rte_flow_attr *attr __rte_unused,
7986                 const struct rte_flow_item items[] __rte_unused,
7987                 const struct rte_flow_action actions[] __rte_unused,
7988                 struct rte_flow_error *error)
7989 {
7990         uint32_t handle_idx = 0;
7991         struct mlx5_flow *dev_flow;
7992         struct mlx5_flow_handle *dev_handle;
7993         struct mlx5_priv *priv = dev->data->dev_private;
7994         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
7995
7996         MLX5_ASSERT(wks);
7997         wks->skip_matcher_reg = 0;
7998         wks->policy = NULL;
7999         wks->final_policy = NULL;
8000         /* In case of corrupting the memory. */
8001         if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
8002                 rte_flow_error_set(error, ENOSPC,
8003                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8004                                    "not free temporary device flow");
8005                 return NULL;
8006         }
8007         dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
8008                                    &handle_idx);
8009         if (!dev_handle) {
8010                 rte_flow_error_set(error, ENOMEM,
8011                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8012                                    "not enough memory to create flow handle");
8013                 return NULL;
8014         }
8015         MLX5_ASSERT(wks->flow_idx < RTE_DIM(wks->flows));
8016         dev_flow = &wks->flows[wks->flow_idx++];
8017         memset(dev_flow, 0, sizeof(*dev_flow));
8018         dev_flow->handle = dev_handle;
8019         dev_flow->handle_idx = handle_idx;
8020         dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
8021         dev_flow->ingress = attr->ingress;
8022         dev_flow->dv.transfer = attr->transfer;
8023         return dev_flow;
8024 }
8025
8026 #ifdef RTE_LIBRTE_MLX5_DEBUG
8027 /**
8028  * Sanity check for match mask and value. Similar to check_valid_spec() in
8029  * kernel driver. If unmasked bit is present in value, it returns failure.
8030  *
8031  * @param match_mask
8032  *   pointer to match mask buffer.
8033  * @param match_value
8034  *   pointer to match value buffer.
8035  *
8036  * @return
8037  *   0 if valid, -EINVAL otherwise.
8038  */
8039 static int
8040 flow_dv_check_valid_spec(void *match_mask, void *match_value)
8041 {
8042         uint8_t *m = match_mask;
8043         uint8_t *v = match_value;
8044         unsigned int i;
8045
8046         for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
8047                 if (v[i] & ~m[i]) {
8048                         DRV_LOG(ERR,
8049                                 "match_value differs from match_criteria"
8050                                 " %p[%u] != %p[%u]",
8051                                 match_value, i, match_mask, i);
8052                         return -EINVAL;
8053                 }
8054         }
8055         return 0;
8056 }
8057 #endif
8058
8059 /**
8060  * Add match of ip_version.
8061  *
8062  * @param[in] group
8063  *   Flow group.
8064  * @param[in] headers_v
8065  *   Values header pointer.
8066  * @param[in] headers_m
8067  *   Masks header pointer.
8068  * @param[in] ip_version
8069  *   The IP version to set.
8070  */
8071 static inline void
8072 flow_dv_set_match_ip_version(uint32_t group,
8073                              void *headers_v,
8074                              void *headers_m,
8075                              uint8_t ip_version)
8076 {
8077         if (group == 0)
8078                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
8079         else
8080                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
8081                          ip_version);
8082         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version);
8083         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0);
8084         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0);
8085 }
8086
8087 /**
8088  * Add Ethernet item to matcher and to the value.
8089  *
8090  * @param[in, out] matcher
8091  *   Flow matcher.
8092  * @param[in, out] key
8093  *   Flow matcher value.
8094  * @param[in] item
8095  *   Flow pattern to translate.
8096  * @param[in] inner
8097  *   Item is inner pattern.
8098  */
8099 static void
8100 flow_dv_translate_item_eth(void *matcher, void *key,
8101                            const struct rte_flow_item *item, int inner,
8102                            uint32_t group)
8103 {
8104         const struct rte_flow_item_eth *eth_m = item->mask;
8105         const struct rte_flow_item_eth *eth_v = item->spec;
8106         const struct rte_flow_item_eth nic_mask = {
8107                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
8108                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
8109                 .type = RTE_BE16(0xffff),
8110                 .has_vlan = 0,
8111         };
8112         void *hdrs_m;
8113         void *hdrs_v;
8114         char *l24_v;
8115         unsigned int i;
8116
8117         if (!eth_v)
8118                 return;
8119         if (!eth_m)
8120                 eth_m = &nic_mask;
8121         if (inner) {
8122                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8123                                          inner_headers);
8124                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8125         } else {
8126                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8127                                          outer_headers);
8128                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8129         }
8130         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, dmac_47_16),
8131                &eth_m->dst, sizeof(eth_m->dst));
8132         /* The value must be in the range of the mask. */
8133         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, dmac_47_16);
8134         for (i = 0; i < sizeof(eth_m->dst); ++i)
8135                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
8136         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, smac_47_16),
8137                &eth_m->src, sizeof(eth_m->src));
8138         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, smac_47_16);
8139         /* The value must be in the range of the mask. */
8140         for (i = 0; i < sizeof(eth_m->dst); ++i)
8141                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
8142         /*
8143          * HW supports match on one Ethertype, the Ethertype following the last
8144          * VLAN tag of the packet (see PRM).
8145          * Set match on ethertype only if ETH header is not followed by VLAN.
8146          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
8147          * ethertype, and use ip_version field instead.
8148          * eCPRI over Ether layer will use type value 0xAEFE.
8149          */
8150         if (eth_m->type == 0xFFFF) {
8151                 /* Set cvlan_tag mask for any single\multi\un-tagged case. */
8152                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8153                 switch (eth_v->type) {
8154                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
8155                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8156                         return;
8157                 case RTE_BE16(RTE_ETHER_TYPE_QINQ):
8158                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8159                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8160                         return;
8161                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
8162                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
8163                         return;
8164                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
8165                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
8166                         return;
8167                 default:
8168                         break;
8169                 }
8170         }
8171         if (eth_m->has_vlan) {
8172                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8173                 if (eth_v->has_vlan) {
8174                         /*
8175                          * Here, when also has_more_vlan field in VLAN item is
8176                          * not set, only single-tagged packets will be matched.
8177                          */
8178                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8179                         return;
8180                 }
8181         }
8182         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
8183                  rte_be_to_cpu_16(eth_m->type));
8184         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
8185         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
8186 }
8187
8188 /**
8189  * Add VLAN item to matcher and to the value.
8190  *
8191  * @param[in, out] dev_flow
8192  *   Flow descriptor.
8193  * @param[in, out] matcher
8194  *   Flow matcher.
8195  * @param[in, out] key
8196  *   Flow matcher value.
8197  * @param[in] item
8198  *   Flow pattern to translate.
8199  * @param[in] inner
8200  *   Item is inner pattern.
8201  */
8202 static void
8203 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
8204                             void *matcher, void *key,
8205                             const struct rte_flow_item *item,
8206                             int inner, uint32_t group)
8207 {
8208         const struct rte_flow_item_vlan *vlan_m = item->mask;
8209         const struct rte_flow_item_vlan *vlan_v = item->spec;
8210         void *hdrs_m;
8211         void *hdrs_v;
8212         uint16_t tci_m;
8213         uint16_t tci_v;
8214
8215         if (inner) {
8216                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8217                                          inner_headers);
8218                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8219         } else {
8220                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8221                                          outer_headers);
8222                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8223                 /*
8224                  * This is workaround, masks are not supported,
8225                  * and pre-validated.
8226                  */
8227                 if (vlan_v)
8228                         dev_flow->handle->vf_vlan.tag =
8229                                         rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
8230         }
8231         /*
8232          * When VLAN item exists in flow, mark packet as tagged,
8233          * even if TCI is not specified.
8234          */
8235         if (!MLX5_GET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag)) {
8236                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8237                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8238         }
8239         if (!vlan_v)
8240                 return;
8241         if (!vlan_m)
8242                 vlan_m = &rte_flow_item_vlan_mask;
8243         tci_m = rte_be_to_cpu_16(vlan_m->tci);
8244         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
8245         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_vid, tci_m);
8246         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_vid, tci_v);
8247         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_cfi, tci_m >> 12);
8248         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_cfi, tci_v >> 12);
8249         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_prio, tci_m >> 13);
8250         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_prio, tci_v >> 13);
8251         /*
8252          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
8253          * ethertype, and use ip_version field instead.
8254          */
8255         if (vlan_m->inner_type == 0xFFFF) {
8256                 switch (vlan_v->inner_type) {
8257                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
8258                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8259                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8260                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
8261                         return;
8262                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
8263                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
8264                         return;
8265                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
8266                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
8267                         return;
8268                 default:
8269                         break;
8270                 }
8271         }
8272         if (vlan_m->has_more_vlan && vlan_v->has_more_vlan) {
8273                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8274                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8275                 /* Only one vlan_tag bit can be set. */
8276                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
8277                 return;
8278         }
8279         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
8280                  rte_be_to_cpu_16(vlan_m->inner_type));
8281         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, ethertype,
8282                  rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
8283 }
8284
8285 /**
8286  * Add IPV4 item to matcher and to the value.
8287  *
8288  * @param[in, out] matcher
8289  *   Flow matcher.
8290  * @param[in, out] key
8291  *   Flow matcher value.
8292  * @param[in] item
8293  *   Flow pattern to translate.
8294  * @param[in] inner
8295  *   Item is inner pattern.
8296  * @param[in] group
8297  *   The group to insert the rule.
8298  */
8299 static void
8300 flow_dv_translate_item_ipv4(void *matcher, void *key,
8301                             const struct rte_flow_item *item,
8302                             int inner, uint32_t group)
8303 {
8304         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
8305         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
8306         const struct rte_flow_item_ipv4 nic_mask = {
8307                 .hdr = {
8308                         .src_addr = RTE_BE32(0xffffffff),
8309                         .dst_addr = RTE_BE32(0xffffffff),
8310                         .type_of_service = 0xff,
8311                         .next_proto_id = 0xff,
8312                         .time_to_live = 0xff,
8313                 },
8314         };
8315         void *headers_m;
8316         void *headers_v;
8317         char *l24_m;
8318         char *l24_v;
8319         uint8_t tos;
8320
8321         if (inner) {
8322                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8323                                          inner_headers);
8324                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8325         } else {
8326                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8327                                          outer_headers);
8328                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8329         }
8330         flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
8331         if (!ipv4_v)
8332                 return;
8333         if (!ipv4_m)
8334                 ipv4_m = &nic_mask;
8335         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8336                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
8337         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8338                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
8339         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
8340         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
8341         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8342                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
8343         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8344                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
8345         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
8346         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
8347         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
8348         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
8349                  ipv4_m->hdr.type_of_service);
8350         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
8351         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
8352                  ipv4_m->hdr.type_of_service >> 2);
8353         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
8354         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8355                  ipv4_m->hdr.next_proto_id);
8356         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8357                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
8358         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
8359                  ipv4_m->hdr.time_to_live);
8360         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
8361                  ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
8362         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
8363                  !!(ipv4_m->hdr.fragment_offset));
8364         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
8365                  !!(ipv4_v->hdr.fragment_offset & ipv4_m->hdr.fragment_offset));
8366 }
8367
8368 /**
8369  * Add IPV6 item to matcher and to the value.
8370  *
8371  * @param[in, out] matcher
8372  *   Flow matcher.
8373  * @param[in, out] key
8374  *   Flow matcher value.
8375  * @param[in] item
8376  *   Flow pattern to translate.
8377  * @param[in] inner
8378  *   Item is inner pattern.
8379  * @param[in] group
8380  *   The group to insert the rule.
8381  */
8382 static void
8383 flow_dv_translate_item_ipv6(void *matcher, void *key,
8384                             const struct rte_flow_item *item,
8385                             int inner, uint32_t group)
8386 {
8387         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
8388         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
8389         const struct rte_flow_item_ipv6 nic_mask = {
8390                 .hdr = {
8391                         .src_addr =
8392                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
8393                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
8394                         .dst_addr =
8395                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
8396                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
8397                         .vtc_flow = RTE_BE32(0xffffffff),
8398                         .proto = 0xff,
8399                         .hop_limits = 0xff,
8400                 },
8401         };
8402         void *headers_m;
8403         void *headers_v;
8404         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8405         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8406         char *l24_m;
8407         char *l24_v;
8408         uint32_t vtc_m;
8409         uint32_t vtc_v;
8410         int i;
8411         int size;
8412
8413         if (inner) {
8414                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8415                                          inner_headers);
8416                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8417         } else {
8418                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8419                                          outer_headers);
8420                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8421         }
8422         flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
8423         if (!ipv6_v)
8424                 return;
8425         if (!ipv6_m)
8426                 ipv6_m = &nic_mask;
8427         size = sizeof(ipv6_m->hdr.dst_addr);
8428         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8429                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
8430         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8431                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
8432         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
8433         for (i = 0; i < size; ++i)
8434                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
8435         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8436                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
8437         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8438                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
8439         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
8440         for (i = 0; i < size; ++i)
8441                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
8442         /* TOS. */
8443         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
8444         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
8445         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
8446         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
8447         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
8448         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
8449         /* Label. */
8450         if (inner) {
8451                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
8452                          vtc_m);
8453                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
8454                          vtc_v);
8455         } else {
8456                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
8457                          vtc_m);
8458                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
8459                          vtc_v);
8460         }
8461         /* Protocol. */
8462         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8463                  ipv6_m->hdr.proto);
8464         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8465                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
8466         /* Hop limit. */
8467         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
8468                  ipv6_m->hdr.hop_limits);
8469         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
8470                  ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
8471         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
8472                  !!(ipv6_m->has_frag_ext));
8473         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
8474                  !!(ipv6_v->has_frag_ext & ipv6_m->has_frag_ext));
8475 }
8476
8477 /**
8478  * Add IPV6 fragment extension item to matcher and to the value.
8479  *
8480  * @param[in, out] matcher
8481  *   Flow matcher.
8482  * @param[in, out] key
8483  *   Flow matcher value.
8484  * @param[in] item
8485  *   Flow pattern to translate.
8486  * @param[in] inner
8487  *   Item is inner pattern.
8488  */
8489 static void
8490 flow_dv_translate_item_ipv6_frag_ext(void *matcher, void *key,
8491                                      const struct rte_flow_item *item,
8492                                      int inner)
8493 {
8494         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_m = item->mask;
8495         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_v = item->spec;
8496         const struct rte_flow_item_ipv6_frag_ext nic_mask = {
8497                 .hdr = {
8498                         .next_header = 0xff,
8499                         .frag_data = RTE_BE16(0xffff),
8500                 },
8501         };
8502         void *headers_m;
8503         void *headers_v;
8504
8505         if (inner) {
8506                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8507                                          inner_headers);
8508                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8509         } else {
8510                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8511                                          outer_headers);
8512                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8513         }
8514         /* IPv6 fragment extension item exists, so packet is IP fragment. */
8515         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
8516         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 1);
8517         if (!ipv6_frag_ext_v)
8518                 return;
8519         if (!ipv6_frag_ext_m)
8520                 ipv6_frag_ext_m = &nic_mask;
8521         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8522                  ipv6_frag_ext_m->hdr.next_header);
8523         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8524                  ipv6_frag_ext_v->hdr.next_header &
8525                  ipv6_frag_ext_m->hdr.next_header);
8526 }
8527
8528 /**
8529  * Add TCP item to matcher and to the value.
8530  *
8531  * @param[in, out] matcher
8532  *   Flow matcher.
8533  * @param[in, out] key
8534  *   Flow matcher value.
8535  * @param[in] item
8536  *   Flow pattern to translate.
8537  * @param[in] inner
8538  *   Item is inner pattern.
8539  */
8540 static void
8541 flow_dv_translate_item_tcp(void *matcher, void *key,
8542                            const struct rte_flow_item *item,
8543                            int inner)
8544 {
8545         const struct rte_flow_item_tcp *tcp_m = item->mask;
8546         const struct rte_flow_item_tcp *tcp_v = item->spec;
8547         void *headers_m;
8548         void *headers_v;
8549
8550         if (inner) {
8551                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8552                                          inner_headers);
8553                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8554         } else {
8555                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8556                                          outer_headers);
8557                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8558         }
8559         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8560         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
8561         if (!tcp_v)
8562                 return;
8563         if (!tcp_m)
8564                 tcp_m = &rte_flow_item_tcp_mask;
8565         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
8566                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
8567         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
8568                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
8569         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
8570                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
8571         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
8572                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
8573         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
8574                  tcp_m->hdr.tcp_flags);
8575         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
8576                  (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
8577 }
8578
8579 /**
8580  * Add UDP item to matcher and to the value.
8581  *
8582  * @param[in, out] matcher
8583  *   Flow matcher.
8584  * @param[in, out] key
8585  *   Flow matcher value.
8586  * @param[in] item
8587  *   Flow pattern to translate.
8588  * @param[in] inner
8589  *   Item is inner pattern.
8590  */
8591 static void
8592 flow_dv_translate_item_udp(void *matcher, void *key,
8593                            const struct rte_flow_item *item,
8594                            int inner)
8595 {
8596         const struct rte_flow_item_udp *udp_m = item->mask;
8597         const struct rte_flow_item_udp *udp_v = item->spec;
8598         void *headers_m;
8599         void *headers_v;
8600
8601         if (inner) {
8602                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8603                                          inner_headers);
8604                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8605         } else {
8606                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8607                                          outer_headers);
8608                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8609         }
8610         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8611         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
8612         if (!udp_v)
8613                 return;
8614         if (!udp_m)
8615                 udp_m = &rte_flow_item_udp_mask;
8616         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
8617                  rte_be_to_cpu_16(udp_m->hdr.src_port));
8618         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
8619                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
8620         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
8621                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
8622         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
8623                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
8624 }
8625
8626 /**
8627  * Add GRE optional Key item to matcher and to the value.
8628  *
8629  * @param[in, out] matcher
8630  *   Flow matcher.
8631  * @param[in, out] key
8632  *   Flow matcher value.
8633  * @param[in] item
8634  *   Flow pattern to translate.
8635  * @param[in] inner
8636  *   Item is inner pattern.
8637  */
8638 static void
8639 flow_dv_translate_item_gre_key(void *matcher, void *key,
8640                                    const struct rte_flow_item *item)
8641 {
8642         const rte_be32_t *key_m = item->mask;
8643         const rte_be32_t *key_v = item->spec;
8644         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8645         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8646         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
8647
8648         /* GRE K bit must be on and should already be validated */
8649         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
8650         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
8651         if (!key_v)
8652                 return;
8653         if (!key_m)
8654                 key_m = &gre_key_default_mask;
8655         MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
8656                  rte_be_to_cpu_32(*key_m) >> 8);
8657         MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
8658                  rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
8659         MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
8660                  rte_be_to_cpu_32(*key_m) & 0xFF);
8661         MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
8662                  rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
8663 }
8664
8665 /**
8666  * Add GRE item to matcher and to the value.
8667  *
8668  * @param[in, out] matcher
8669  *   Flow matcher.
8670  * @param[in, out] key
8671  *   Flow matcher value.
8672  * @param[in] item
8673  *   Flow pattern to translate.
8674  * @param[in] inner
8675  *   Item is inner pattern.
8676  */
8677 static void
8678 flow_dv_translate_item_gre(void *matcher, void *key,
8679                            const struct rte_flow_item *item,
8680                            int inner)
8681 {
8682         const struct rte_flow_item_gre *gre_m = item->mask;
8683         const struct rte_flow_item_gre *gre_v = item->spec;
8684         void *headers_m;
8685         void *headers_v;
8686         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8687         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8688         struct {
8689                 union {
8690                         __extension__
8691                         struct {
8692                                 uint16_t version:3;
8693                                 uint16_t rsvd0:9;
8694                                 uint16_t s_present:1;
8695                                 uint16_t k_present:1;
8696                                 uint16_t rsvd_bit1:1;
8697                                 uint16_t c_present:1;
8698                         };
8699                         uint16_t value;
8700                 };
8701         } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
8702
8703         if (inner) {
8704                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8705                                          inner_headers);
8706                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8707         } else {
8708                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8709                                          outer_headers);
8710                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8711         }
8712         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8713         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
8714         if (!gre_v)
8715                 return;
8716         if (!gre_m)
8717                 gre_m = &rte_flow_item_gre_mask;
8718         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
8719                  rte_be_to_cpu_16(gre_m->protocol));
8720         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
8721                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
8722         gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
8723         gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
8724         MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
8725                  gre_crks_rsvd0_ver_m.c_present);
8726         MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
8727                  gre_crks_rsvd0_ver_v.c_present &
8728                  gre_crks_rsvd0_ver_m.c_present);
8729         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
8730                  gre_crks_rsvd0_ver_m.k_present);
8731         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
8732                  gre_crks_rsvd0_ver_v.k_present &
8733                  gre_crks_rsvd0_ver_m.k_present);
8734         MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
8735                  gre_crks_rsvd0_ver_m.s_present);
8736         MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
8737                  gre_crks_rsvd0_ver_v.s_present &
8738                  gre_crks_rsvd0_ver_m.s_present);
8739 }
8740
8741 /**
8742  * Add NVGRE item to matcher and to the value.
8743  *
8744  * @param[in, out] matcher
8745  *   Flow matcher.
8746  * @param[in, out] key
8747  *   Flow matcher value.
8748  * @param[in] item
8749  *   Flow pattern to translate.
8750  * @param[in] inner
8751  *   Item is inner pattern.
8752  */
8753 static void
8754 flow_dv_translate_item_nvgre(void *matcher, void *key,
8755                              const struct rte_flow_item *item,
8756                              int inner)
8757 {
8758         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
8759         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
8760         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8761         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8762         const char *tni_flow_id_m;
8763         const char *tni_flow_id_v;
8764         char *gre_key_m;
8765         char *gre_key_v;
8766         int size;
8767         int i;
8768
8769         /* For NVGRE, GRE header fields must be set with defined values. */
8770         const struct rte_flow_item_gre gre_spec = {
8771                 .c_rsvd0_ver = RTE_BE16(0x2000),
8772                 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
8773         };
8774         const struct rte_flow_item_gre gre_mask = {
8775                 .c_rsvd0_ver = RTE_BE16(0xB000),
8776                 .protocol = RTE_BE16(UINT16_MAX),
8777         };
8778         const struct rte_flow_item gre_item = {
8779                 .spec = &gre_spec,
8780                 .mask = &gre_mask,
8781                 .last = NULL,
8782         };
8783         flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
8784         if (!nvgre_v)
8785                 return;
8786         if (!nvgre_m)
8787                 nvgre_m = &rte_flow_item_nvgre_mask;
8788         tni_flow_id_m = (const char *)nvgre_m->tni;
8789         tni_flow_id_v = (const char *)nvgre_v->tni;
8790         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
8791         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
8792         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
8793         memcpy(gre_key_m, tni_flow_id_m, size);
8794         for (i = 0; i < size; ++i)
8795                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
8796 }
8797
8798 /**
8799  * Add VXLAN item to matcher and to the value.
8800  *
8801  * @param[in] dev
8802  *   Pointer to the Ethernet device structure.
8803  * @param[in] attr
8804  *   Flow rule attributes.
8805  * @param[in, out] matcher
8806  *   Flow matcher.
8807  * @param[in, out] key
8808  *   Flow matcher value.
8809  * @param[in] item
8810  *   Flow pattern to translate.
8811  * @param[in] inner
8812  *   Item is inner pattern.
8813  */
8814 static void
8815 flow_dv_translate_item_vxlan(struct rte_eth_dev *dev,
8816                              const struct rte_flow_attr *attr,
8817                              void *matcher, void *key,
8818                              const struct rte_flow_item *item,
8819                              int inner)
8820 {
8821         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
8822         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
8823         void *headers_m;
8824         void *headers_v;
8825         void *misc5_m;
8826         void *misc5_v;
8827         uint32_t *tunnel_header_v;
8828         uint32_t *tunnel_header_m;
8829         uint16_t dport;
8830         struct mlx5_priv *priv = dev->data->dev_private;
8831         const struct rte_flow_item_vxlan nic_mask = {
8832                 .vni = "\xff\xff\xff",
8833                 .rsvd1 = 0xff,
8834         };
8835
8836         if (inner) {
8837                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8838                                          inner_headers);
8839                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8840         } else {
8841                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8842                                          outer_headers);
8843                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8844         }
8845         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
8846                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
8847         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8848                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8849                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8850         }
8851         if (!vxlan_v)
8852                 return;
8853         if (!vxlan_m) {
8854                 if ((!attr->group && !priv->sh->tunnel_header_0_1) ||
8855                     (attr->group && !priv->sh->misc5_cap))
8856                         vxlan_m = &rte_flow_item_vxlan_mask;
8857                 else
8858                         vxlan_m = &nic_mask;
8859         }
8860         if ((!attr->group && !attr->transfer && !priv->sh->tunnel_header_0_1) ||
8861             ((attr->group || attr->transfer) && !priv->sh->misc5_cap)) {
8862                 void *misc_m;
8863                 void *misc_v;
8864                 char *vni_m;
8865                 char *vni_v;
8866                 int size;
8867                 int i;
8868                 misc_m = MLX5_ADDR_OF(fte_match_param,
8869                                       matcher, misc_parameters);
8870                 misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8871                 size = sizeof(vxlan_m->vni);
8872                 vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
8873                 vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
8874                 memcpy(vni_m, vxlan_m->vni, size);
8875                 for (i = 0; i < size; ++i)
8876                         vni_v[i] = vni_m[i] & vxlan_v->vni[i];
8877                 return;
8878         }
8879         misc5_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_5);
8880         misc5_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_5);
8881         tunnel_header_v = (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc5,
8882                                                    misc5_v,
8883                                                    tunnel_header_1);
8884         tunnel_header_m = (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc5,
8885                                                    misc5_m,
8886                                                    tunnel_header_1);
8887         *tunnel_header_v = (vxlan_v->vni[0] & vxlan_m->vni[0]) |
8888                            (vxlan_v->vni[1] & vxlan_m->vni[1]) << 8 |
8889                            (vxlan_v->vni[2] & vxlan_m->vni[2]) << 16;
8890         if (*tunnel_header_v)
8891                 *tunnel_header_m = vxlan_m->vni[0] |
8892                         vxlan_m->vni[1] << 8 |
8893                         vxlan_m->vni[2] << 16;
8894         else
8895                 *tunnel_header_m = 0x0;
8896         *tunnel_header_v |= (vxlan_v->rsvd1 & vxlan_m->rsvd1) << 24;
8897         if (vxlan_v->rsvd1 & vxlan_m->rsvd1)
8898                 *tunnel_header_m |= vxlan_m->rsvd1 << 24;
8899 }
8900
8901 /**
8902  * Add VXLAN-GPE item to matcher and to the value.
8903  *
8904  * @param[in, out] matcher
8905  *   Flow matcher.
8906  * @param[in, out] key
8907  *   Flow matcher value.
8908  * @param[in] item
8909  *   Flow pattern to translate.
8910  * @param[in] inner
8911  *   Item is inner pattern.
8912  */
8913
8914 static void
8915 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
8916                                  const struct rte_flow_item *item, int inner)
8917 {
8918         const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
8919         const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
8920         void *headers_m;
8921         void *headers_v;
8922         void *misc_m =
8923                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
8924         void *misc_v =
8925                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
8926         char *vni_m;
8927         char *vni_v;
8928         uint16_t dport;
8929         int size;
8930         int i;
8931         uint8_t flags_m = 0xff;
8932         uint8_t flags_v = 0xc;
8933
8934         if (inner) {
8935                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8936                                          inner_headers);
8937                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8938         } else {
8939                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8940                                          outer_headers);
8941                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8942         }
8943         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
8944                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
8945         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8946                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8947                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8948         }
8949         if (!vxlan_v)
8950                 return;
8951         if (!vxlan_m)
8952                 vxlan_m = &rte_flow_item_vxlan_gpe_mask;
8953         size = sizeof(vxlan_m->vni);
8954         vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
8955         vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
8956         memcpy(vni_m, vxlan_m->vni, size);
8957         for (i = 0; i < size; ++i)
8958                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
8959         if (vxlan_m->flags) {
8960                 flags_m = vxlan_m->flags;
8961                 flags_v = vxlan_v->flags;
8962         }
8963         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
8964         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
8965         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol,
8966                  vxlan_m->protocol);
8967         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol,
8968                  vxlan_v->protocol);
8969 }
8970
8971 /**
8972  * Add Geneve item to matcher and to the value.
8973  *
8974  * @param[in, out] matcher
8975  *   Flow matcher.
8976  * @param[in, out] key
8977  *   Flow matcher value.
8978  * @param[in] item
8979  *   Flow pattern to translate.
8980  * @param[in] inner
8981  *   Item is inner pattern.
8982  */
8983
8984 static void
8985 flow_dv_translate_item_geneve(void *matcher, void *key,
8986                               const struct rte_flow_item *item, int inner)
8987 {
8988         const struct rte_flow_item_geneve *geneve_m = item->mask;
8989         const struct rte_flow_item_geneve *geneve_v = item->spec;
8990         void *headers_m;
8991         void *headers_v;
8992         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8993         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8994         uint16_t dport;
8995         uint16_t gbhdr_m;
8996         uint16_t gbhdr_v;
8997         char *vni_m;
8998         char *vni_v;
8999         size_t size, i;
9000
9001         if (inner) {
9002                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9003                                          inner_headers);
9004                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9005         } else {
9006                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9007                                          outer_headers);
9008                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9009         }
9010         dport = MLX5_UDP_PORT_GENEVE;
9011         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9012                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
9013                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
9014         }
9015         if (!geneve_v)
9016                 return;
9017         if (!geneve_m)
9018                 geneve_m = &rte_flow_item_geneve_mask;
9019         size = sizeof(geneve_m->vni);
9020         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
9021         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
9022         memcpy(vni_m, geneve_m->vni, size);
9023         for (i = 0; i < size; ++i)
9024                 vni_v[i] = vni_m[i] & geneve_v->vni[i];
9025         MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
9026                  rte_be_to_cpu_16(geneve_m->protocol));
9027         MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
9028                  rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
9029         gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
9030         gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
9031         MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
9032                  MLX5_GENEVE_OAMF_VAL(gbhdr_m));
9033         MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
9034                  MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
9035         MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
9036                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
9037         MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
9038                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
9039                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
9040 }
9041
9042 /**
9043  * Create Geneve TLV option resource.
9044  *
9045  * @param dev[in, out]
9046  *   Pointer to rte_eth_dev structure.
9047  * @param[in, out] tag_be24
9048  *   Tag value in big endian then R-shift 8.
9049  * @parm[in, out] dev_flow
9050  *   Pointer to the dev_flow.
9051  * @param[out] error
9052  *   pointer to error structure.
9053  *
9054  * @return
9055  *   0 on success otherwise -errno and errno is set.
9056  */
9057
9058 int
9059 flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev,
9060                                              const struct rte_flow_item *item,
9061                                              struct rte_flow_error *error)
9062 {
9063         struct mlx5_priv *priv = dev->data->dev_private;
9064         struct mlx5_dev_ctx_shared *sh = priv->sh;
9065         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
9066                         sh->geneve_tlv_option_resource;
9067         struct mlx5_devx_obj *obj;
9068         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
9069         int ret = 0;
9070
9071         if (!geneve_opt_v)
9072                 return -1;
9073         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
9074         if (geneve_opt_resource != NULL) {
9075                 if (geneve_opt_resource->option_class ==
9076                         geneve_opt_v->option_class &&
9077                         geneve_opt_resource->option_type ==
9078                         geneve_opt_v->option_type &&
9079                         geneve_opt_resource->length ==
9080                         geneve_opt_v->option_len) {
9081                         /* We already have GENVE TLV option obj allocated. */
9082                         __atomic_fetch_add(&geneve_opt_resource->refcnt, 1,
9083                                            __ATOMIC_RELAXED);
9084                 } else {
9085                         ret = rte_flow_error_set(error, ENOMEM,
9086                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9087                                 "Only one GENEVE TLV option supported");
9088                         goto exit;
9089                 }
9090         } else {
9091                 /* Create a GENEVE TLV object and resource. */
9092                 obj = mlx5_devx_cmd_create_geneve_tlv_option(sh->ctx,
9093                                 geneve_opt_v->option_class,
9094                                 geneve_opt_v->option_type,
9095                                 geneve_opt_v->option_len);
9096                 if (!obj) {
9097                         ret = rte_flow_error_set(error, ENODATA,
9098                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9099                                 "Failed to create GENEVE TLV Devx object");
9100                         goto exit;
9101                 }
9102                 sh->geneve_tlv_option_resource =
9103                                 mlx5_malloc(MLX5_MEM_ZERO,
9104                                                 sizeof(*geneve_opt_resource),
9105                                                 0, SOCKET_ID_ANY);
9106                 if (!sh->geneve_tlv_option_resource) {
9107                         claim_zero(mlx5_devx_cmd_destroy(obj));
9108                         ret = rte_flow_error_set(error, ENOMEM,
9109                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9110                                 "GENEVE TLV object memory allocation failed");
9111                         goto exit;
9112                 }
9113                 geneve_opt_resource = sh->geneve_tlv_option_resource;
9114                 geneve_opt_resource->obj = obj;
9115                 geneve_opt_resource->option_class = geneve_opt_v->option_class;
9116                 geneve_opt_resource->option_type = geneve_opt_v->option_type;
9117                 geneve_opt_resource->length = geneve_opt_v->option_len;
9118                 __atomic_store_n(&geneve_opt_resource->refcnt, 1,
9119                                 __ATOMIC_RELAXED);
9120         }
9121 exit:
9122         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
9123         return ret;
9124 }
9125
9126 /**
9127  * Add Geneve TLV option item to matcher.
9128  *
9129  * @param[in, out] dev
9130  *   Pointer to rte_eth_dev structure.
9131  * @param[in, out] matcher
9132  *   Flow matcher.
9133  * @param[in, out] key
9134  *   Flow matcher value.
9135  * @param[in] item
9136  *   Flow pattern to translate.
9137  * @param[out] error
9138  *   Pointer to error structure.
9139  */
9140 static int
9141 flow_dv_translate_item_geneve_opt(struct rte_eth_dev *dev, void *matcher,
9142                                   void *key, const struct rte_flow_item *item,
9143                                   struct rte_flow_error *error)
9144 {
9145         const struct rte_flow_item_geneve_opt *geneve_opt_m = item->mask;
9146         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
9147         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9148         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9149         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9150                         misc_parameters_3);
9151         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9152         rte_be32_t opt_data_key = 0, opt_data_mask = 0;
9153         int ret = 0;
9154
9155         if (!geneve_opt_v)
9156                 return -1;
9157         if (!geneve_opt_m)
9158                 geneve_opt_m = &rte_flow_item_geneve_opt_mask;
9159         ret = flow_dev_geneve_tlv_option_resource_register(dev, item,
9160                                                            error);
9161         if (ret) {
9162                 DRV_LOG(ERR, "Failed to create geneve_tlv_obj");
9163                 return ret;
9164         }
9165         /*
9166          * Set the option length in GENEVE header if not requested.
9167          * The GENEVE TLV option length is expressed by the option length field
9168          * in the GENEVE header.
9169          * If the option length was not requested but the GENEVE TLV option item
9170          * is present we set the option length field implicitly.
9171          */
9172         if (!MLX5_GET16(fte_match_set_misc, misc_m, geneve_opt_len)) {
9173                 MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
9174                          MLX5_GENEVE_OPTLEN_MASK);
9175                 MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
9176                          geneve_opt_v->option_len + 1);
9177         }
9178         /* Set the data. */
9179         if (geneve_opt_v->data) {
9180                 memcpy(&opt_data_key, geneve_opt_v->data,
9181                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
9182                                 sizeof(opt_data_key)));
9183                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
9184                                 sizeof(opt_data_key));
9185                 memcpy(&opt_data_mask, geneve_opt_m->data,
9186                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
9187                                 sizeof(opt_data_mask)));
9188                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
9189                                 sizeof(opt_data_mask));
9190                 MLX5_SET(fte_match_set_misc3, misc3_m,
9191                                 geneve_tlv_option_0_data,
9192                                 rte_be_to_cpu_32(opt_data_mask));
9193                 MLX5_SET(fte_match_set_misc3, misc3_v,
9194                                 geneve_tlv_option_0_data,
9195                         rte_be_to_cpu_32(opt_data_key & opt_data_mask));
9196         }
9197         return ret;
9198 }
9199
9200 /**
9201  * Add MPLS item to matcher and to the value.
9202  *
9203  * @param[in, out] matcher
9204  *   Flow matcher.
9205  * @param[in, out] key
9206  *   Flow matcher value.
9207  * @param[in] item
9208  *   Flow pattern to translate.
9209  * @param[in] prev_layer
9210  *   The protocol layer indicated in previous item.
9211  * @param[in] inner
9212  *   Item is inner pattern.
9213  */
9214 static void
9215 flow_dv_translate_item_mpls(void *matcher, void *key,
9216                             const struct rte_flow_item *item,
9217                             uint64_t prev_layer,
9218                             int inner)
9219 {
9220         const uint32_t *in_mpls_m = item->mask;
9221         const uint32_t *in_mpls_v = item->spec;
9222         uint32_t *out_mpls_m = 0;
9223         uint32_t *out_mpls_v = 0;
9224         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9225         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9226         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
9227                                      misc_parameters_2);
9228         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
9229         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
9230         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9231
9232         switch (prev_layer) {
9233         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
9234                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
9235                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
9236                          MLX5_UDP_PORT_MPLS);
9237                 break;
9238         case MLX5_FLOW_LAYER_GRE:
9239                 /* Fall-through. */
9240         case MLX5_FLOW_LAYER_GRE_KEY:
9241                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
9242                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
9243                          RTE_ETHER_TYPE_MPLS);
9244                 break;
9245         default:
9246                 break;
9247         }
9248         if (!in_mpls_v)
9249                 return;
9250         if (!in_mpls_m)
9251                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
9252         switch (prev_layer) {
9253         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
9254                 out_mpls_m =
9255                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
9256                                                  outer_first_mpls_over_udp);
9257                 out_mpls_v =
9258                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
9259                                                  outer_first_mpls_over_udp);
9260                 break;
9261         case MLX5_FLOW_LAYER_GRE:
9262                 out_mpls_m =
9263                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
9264                                                  outer_first_mpls_over_gre);
9265                 out_mpls_v =
9266                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
9267                                                  outer_first_mpls_over_gre);
9268                 break;
9269         default:
9270                 /* Inner MPLS not over GRE is not supported. */
9271                 if (!inner) {
9272                         out_mpls_m =
9273                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
9274                                                          misc2_m,
9275                                                          outer_first_mpls);
9276                         out_mpls_v =
9277                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
9278                                                          misc2_v,
9279                                                          outer_first_mpls);
9280                 }
9281                 break;
9282         }
9283         if (out_mpls_m && out_mpls_v) {
9284                 *out_mpls_m = *in_mpls_m;
9285                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
9286         }
9287 }
9288
9289 /**
9290  * Add metadata register item to matcher
9291  *
9292  * @param[in, out] matcher
9293  *   Flow matcher.
9294  * @param[in, out] key
9295  *   Flow matcher value.
9296  * @param[in] reg_type
9297  *   Type of device metadata register
9298  * @param[in] value
9299  *   Register value
9300  * @param[in] mask
9301  *   Register mask
9302  */
9303 static void
9304 flow_dv_match_meta_reg(void *matcher, void *key,
9305                        enum modify_reg reg_type,
9306                        uint32_t data, uint32_t mask)
9307 {
9308         void *misc2_m =
9309                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
9310         void *misc2_v =
9311                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
9312         uint32_t temp;
9313
9314         data &= mask;
9315         switch (reg_type) {
9316         case REG_A:
9317                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
9318                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
9319                 break;
9320         case REG_B:
9321                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
9322                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
9323                 break;
9324         case REG_C_0:
9325                 /*
9326                  * The metadata register C0 field might be divided into
9327                  * source vport index and META item value, we should set
9328                  * this field according to specified mask, not as whole one.
9329                  */
9330                 temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
9331                 temp |= mask;
9332                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
9333                 temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
9334                 temp &= ~mask;
9335                 temp |= data;
9336                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
9337                 break;
9338         case REG_C_1:
9339                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
9340                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
9341                 break;
9342         case REG_C_2:
9343                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
9344                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
9345                 break;
9346         case REG_C_3:
9347                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
9348                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
9349                 break;
9350         case REG_C_4:
9351                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
9352                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
9353                 break;
9354         case REG_C_5:
9355                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
9356                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
9357                 break;
9358         case REG_C_6:
9359                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
9360                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
9361                 break;
9362         case REG_C_7:
9363                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
9364                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
9365                 break;
9366         default:
9367                 MLX5_ASSERT(false);
9368                 break;
9369         }
9370 }
9371
9372 /**
9373  * Add MARK item to matcher
9374  *
9375  * @param[in] dev
9376  *   The device to configure through.
9377  * @param[in, out] matcher
9378  *   Flow matcher.
9379  * @param[in, out] key
9380  *   Flow matcher value.
9381  * @param[in] item
9382  *   Flow pattern to translate.
9383  */
9384 static void
9385 flow_dv_translate_item_mark(struct rte_eth_dev *dev,
9386                             void *matcher, void *key,
9387                             const struct rte_flow_item *item)
9388 {
9389         struct mlx5_priv *priv = dev->data->dev_private;
9390         const struct rte_flow_item_mark *mark;
9391         uint32_t value;
9392         uint32_t mask;
9393
9394         mark = item->mask ? (const void *)item->mask :
9395                             &rte_flow_item_mark_mask;
9396         mask = mark->id & priv->sh->dv_mark_mask;
9397         mark = (const void *)item->spec;
9398         MLX5_ASSERT(mark);
9399         value = mark->id & priv->sh->dv_mark_mask & mask;
9400         if (mask) {
9401                 enum modify_reg reg;
9402
9403                 /* Get the metadata register index for the mark. */
9404                 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
9405                 MLX5_ASSERT(reg > 0);
9406                 if (reg == REG_C_0) {
9407                         struct mlx5_priv *priv = dev->data->dev_private;
9408                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9409                         uint32_t shl_c0 = rte_bsf32(msk_c0);
9410
9411                         mask &= msk_c0;
9412                         mask <<= shl_c0;
9413                         value <<= shl_c0;
9414                 }
9415                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
9416         }
9417 }
9418
9419 /**
9420  * Add META item to matcher
9421  *
9422  * @param[in] dev
9423  *   The devich to configure through.
9424  * @param[in, out] matcher
9425  *   Flow matcher.
9426  * @param[in, out] key
9427  *   Flow matcher value.
9428  * @param[in] attr
9429  *   Attributes of flow that includes this item.
9430  * @param[in] item
9431  *   Flow pattern to translate.
9432  */
9433 static void
9434 flow_dv_translate_item_meta(struct rte_eth_dev *dev,
9435                             void *matcher, void *key,
9436                             const struct rte_flow_attr *attr,
9437                             const struct rte_flow_item *item)
9438 {
9439         const struct rte_flow_item_meta *meta_m;
9440         const struct rte_flow_item_meta *meta_v;
9441
9442         meta_m = (const void *)item->mask;
9443         if (!meta_m)
9444                 meta_m = &rte_flow_item_meta_mask;
9445         meta_v = (const void *)item->spec;
9446         if (meta_v) {
9447                 int reg;
9448                 uint32_t value = meta_v->data;
9449                 uint32_t mask = meta_m->data;
9450
9451                 reg = flow_dv_get_metadata_reg(dev, attr, NULL);
9452                 if (reg < 0)
9453                         return;
9454                 MLX5_ASSERT(reg != REG_NON);
9455                 if (reg == REG_C_0) {
9456                         struct mlx5_priv *priv = dev->data->dev_private;
9457                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9458                         uint32_t shl_c0 = rte_bsf32(msk_c0);
9459
9460                         mask &= msk_c0;
9461                         mask <<= shl_c0;
9462                         value <<= shl_c0;
9463                 }
9464                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
9465         }
9466 }
9467
9468 /**
9469  * Add vport metadata Reg C0 item to matcher
9470  *
9471  * @param[in, out] matcher
9472  *   Flow matcher.
9473  * @param[in, out] key
9474  *   Flow matcher value.
9475  * @param[in] reg
9476  *   Flow pattern to translate.
9477  */
9478 static void
9479 flow_dv_translate_item_meta_vport(void *matcher, void *key,
9480                                   uint32_t value, uint32_t mask)
9481 {
9482         flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
9483 }
9484
9485 /**
9486  * Add tag item to matcher
9487  *
9488  * @param[in] dev
9489  *   The devich to configure through.
9490  * @param[in, out] matcher
9491  *   Flow matcher.
9492  * @param[in, out] key
9493  *   Flow matcher value.
9494  * @param[in] item
9495  *   Flow pattern to translate.
9496  */
9497 static void
9498 flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
9499                                 void *matcher, void *key,
9500                                 const struct rte_flow_item *item)
9501 {
9502         const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
9503         const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
9504         uint32_t mask, value;
9505
9506         MLX5_ASSERT(tag_v);
9507         value = tag_v->data;
9508         mask = tag_m ? tag_m->data : UINT32_MAX;
9509         if (tag_v->id == REG_C_0) {
9510                 struct mlx5_priv *priv = dev->data->dev_private;
9511                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9512                 uint32_t shl_c0 = rte_bsf32(msk_c0);
9513
9514                 mask &= msk_c0;
9515                 mask <<= shl_c0;
9516                 value <<= shl_c0;
9517         }
9518         flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
9519 }
9520
9521 /**
9522  * Add TAG item to matcher
9523  *
9524  * @param[in] dev
9525  *   The devich to configure through.
9526  * @param[in, out] matcher
9527  *   Flow matcher.
9528  * @param[in, out] key
9529  *   Flow matcher value.
9530  * @param[in] item
9531  *   Flow pattern to translate.
9532  */
9533 static void
9534 flow_dv_translate_item_tag(struct rte_eth_dev *dev,
9535                            void *matcher, void *key,
9536                            const struct rte_flow_item *item)
9537 {
9538         const struct rte_flow_item_tag *tag_v = item->spec;
9539         const struct rte_flow_item_tag *tag_m = item->mask;
9540         enum modify_reg reg;
9541
9542         MLX5_ASSERT(tag_v);
9543         tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
9544         /* Get the metadata register index for the tag. */
9545         reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
9546         MLX5_ASSERT(reg > 0);
9547         flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
9548 }
9549
9550 /**
9551  * Add source vport match to the specified matcher.
9552  *
9553  * @param[in, out] matcher
9554  *   Flow matcher.
9555  * @param[in, out] key
9556  *   Flow matcher value.
9557  * @param[in] port
9558  *   Source vport value to match
9559  * @param[in] mask
9560  *   Mask
9561  */
9562 static void
9563 flow_dv_translate_item_source_vport(void *matcher, void *key,
9564                                     int16_t port, uint16_t mask)
9565 {
9566         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9567         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9568
9569         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
9570         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
9571 }
9572
9573 /**
9574  * Translate port-id item to eswitch match on  port-id.
9575  *
9576  * @param[in] dev
9577  *   The devich to configure through.
9578  * @param[in, out] matcher
9579  *   Flow matcher.
9580  * @param[in, out] key
9581  *   Flow matcher value.
9582  * @param[in] item
9583  *   Flow pattern to translate.
9584  * @param[in]
9585  *   Flow attributes.
9586  *
9587  * @return
9588  *   0 on success, a negative errno value otherwise.
9589  */
9590 static int
9591 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
9592                                void *key, const struct rte_flow_item *item,
9593                                const struct rte_flow_attr *attr)
9594 {
9595         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
9596         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
9597         struct mlx5_priv *priv;
9598         uint16_t mask, id;
9599
9600         mask = pid_m ? pid_m->id : 0xffff;
9601         id = pid_v ? pid_v->id : dev->data->port_id;
9602         priv = mlx5_port_to_eswitch_info(id, item == NULL);
9603         if (!priv)
9604                 return -rte_errno;
9605         /*
9606          * Translate to vport field or to metadata, depending on mode.
9607          * Kernel can use either misc.source_port or half of C0 metadata
9608          * register.
9609          */
9610         if (priv->vport_meta_mask) {
9611                 /*
9612                  * Provide the hint for SW steering library
9613                  * to insert the flow into ingress domain and
9614                  * save the extra vport match.
9615                  */
9616                 if (mask == 0xffff && priv->vport_id == 0xffff &&
9617                     priv->pf_bond < 0 && attr->transfer)
9618                         flow_dv_translate_item_source_vport
9619                                 (matcher, key, priv->vport_id, mask);
9620                 /*
9621                  * We should always set the vport metadata register,
9622                  * otherwise the SW steering library can drop
9623                  * the rule if wire vport metadata value is not zero,
9624                  * it depends on kernel configuration.
9625                  */
9626                 flow_dv_translate_item_meta_vport(matcher, key,
9627                                                   priv->vport_meta_tag,
9628                                                   priv->vport_meta_mask);
9629         } else {
9630                 flow_dv_translate_item_source_vport(matcher, key,
9631                                                     priv->vport_id, mask);
9632         }
9633         return 0;
9634 }
9635
9636 /**
9637  * Add ICMP6 item to matcher and to the value.
9638  *
9639  * @param[in, out] matcher
9640  *   Flow matcher.
9641  * @param[in, out] key
9642  *   Flow matcher value.
9643  * @param[in] item
9644  *   Flow pattern to translate.
9645  * @param[in] inner
9646  *   Item is inner pattern.
9647  */
9648 static void
9649 flow_dv_translate_item_icmp6(void *matcher, void *key,
9650                               const struct rte_flow_item *item,
9651                               int inner)
9652 {
9653         const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
9654         const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
9655         void *headers_m;
9656         void *headers_v;
9657         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9658                                      misc_parameters_3);
9659         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9660         if (inner) {
9661                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9662                                          inner_headers);
9663                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9664         } else {
9665                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9666                                          outer_headers);
9667                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9668         }
9669         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
9670         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
9671         if (!icmp6_v)
9672                 return;
9673         if (!icmp6_m)
9674                 icmp6_m = &rte_flow_item_icmp6_mask;
9675         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
9676         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
9677                  icmp6_v->type & icmp6_m->type);
9678         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
9679         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
9680                  icmp6_v->code & icmp6_m->code);
9681 }
9682
9683 /**
9684  * Add ICMP item to matcher and to the value.
9685  *
9686  * @param[in, out] matcher
9687  *   Flow matcher.
9688  * @param[in, out] key
9689  *   Flow matcher value.
9690  * @param[in] item
9691  *   Flow pattern to translate.
9692  * @param[in] inner
9693  *   Item is inner pattern.
9694  */
9695 static void
9696 flow_dv_translate_item_icmp(void *matcher, void *key,
9697                             const struct rte_flow_item *item,
9698                             int inner)
9699 {
9700         const struct rte_flow_item_icmp *icmp_m = item->mask;
9701         const struct rte_flow_item_icmp *icmp_v = item->spec;
9702         uint32_t icmp_header_data_m = 0;
9703         uint32_t icmp_header_data_v = 0;
9704         void *headers_m;
9705         void *headers_v;
9706         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9707                                      misc_parameters_3);
9708         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9709         if (inner) {
9710                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9711                                          inner_headers);
9712                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9713         } else {
9714                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9715                                          outer_headers);
9716                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9717         }
9718         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
9719         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
9720         if (!icmp_v)
9721                 return;
9722         if (!icmp_m)
9723                 icmp_m = &rte_flow_item_icmp_mask;
9724         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
9725                  icmp_m->hdr.icmp_type);
9726         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
9727                  icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
9728         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
9729                  icmp_m->hdr.icmp_code);
9730         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
9731                  icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
9732         icmp_header_data_m = rte_be_to_cpu_16(icmp_m->hdr.icmp_seq_nb);
9733         icmp_header_data_m |= rte_be_to_cpu_16(icmp_m->hdr.icmp_ident) << 16;
9734         if (icmp_header_data_m) {
9735                 icmp_header_data_v = rte_be_to_cpu_16(icmp_v->hdr.icmp_seq_nb);
9736                 icmp_header_data_v |=
9737                          rte_be_to_cpu_16(icmp_v->hdr.icmp_ident) << 16;
9738                 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_header_data,
9739                          icmp_header_data_m);
9740                 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_header_data,
9741                          icmp_header_data_v & icmp_header_data_m);
9742         }
9743 }
9744
9745 /**
9746  * Add GTP item to matcher and to the value.
9747  *
9748  * @param[in, out] matcher
9749  *   Flow matcher.
9750  * @param[in, out] key
9751  *   Flow matcher value.
9752  * @param[in] item
9753  *   Flow pattern to translate.
9754  * @param[in] inner
9755  *   Item is inner pattern.
9756  */
9757 static void
9758 flow_dv_translate_item_gtp(void *matcher, void *key,
9759                            const struct rte_flow_item *item, int inner)
9760 {
9761         const struct rte_flow_item_gtp *gtp_m = item->mask;
9762         const struct rte_flow_item_gtp *gtp_v = item->spec;
9763         void *headers_m;
9764         void *headers_v;
9765         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9766                                      misc_parameters_3);
9767         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9768         uint16_t dport = RTE_GTPU_UDP_PORT;
9769
9770         if (inner) {
9771                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9772                                          inner_headers);
9773                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9774         } else {
9775                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9776                                          outer_headers);
9777                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9778         }
9779         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9780                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
9781                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
9782         }
9783         if (!gtp_v)
9784                 return;
9785         if (!gtp_m)
9786                 gtp_m = &rte_flow_item_gtp_mask;
9787         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags,
9788                  gtp_m->v_pt_rsv_flags);
9789         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags,
9790                  gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags);
9791         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
9792         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
9793                  gtp_v->msg_type & gtp_m->msg_type);
9794         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,
9795                  rte_be_to_cpu_32(gtp_m->teid));
9796         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,
9797                  rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
9798 }
9799
9800 /**
9801  * Add GTP PSC item to matcher.
9802  *
9803  * @param[in, out] matcher
9804  *   Flow matcher.
9805  * @param[in, out] key
9806  *   Flow matcher value.
9807  * @param[in] item
9808  *   Flow pattern to translate.
9809  */
9810 static int
9811 flow_dv_translate_item_gtp_psc(void *matcher, void *key,
9812                                const struct rte_flow_item *item)
9813 {
9814         const struct rte_flow_item_gtp_psc *gtp_psc_m = item->mask;
9815         const struct rte_flow_item_gtp_psc *gtp_psc_v = item->spec;
9816         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9817                         misc_parameters_3);
9818         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9819         union {
9820                 uint32_t w32;
9821                 struct {
9822                         uint16_t seq_num;
9823                         uint8_t npdu_num;
9824                         uint8_t next_ext_header_type;
9825                 };
9826         } dw_2;
9827         uint8_t gtp_flags;
9828
9829         /* Always set E-flag match on one, regardless of GTP item settings. */
9830         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_m, gtpu_msg_flags);
9831         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
9832         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags, gtp_flags);
9833         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_v, gtpu_msg_flags);
9834         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
9835         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags, gtp_flags);
9836         /*Set next extension header type. */
9837         dw_2.seq_num = 0;
9838         dw_2.npdu_num = 0;
9839         dw_2.next_ext_header_type = 0xff;
9840         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_dw_2,
9841                  rte_cpu_to_be_32(dw_2.w32));
9842         dw_2.seq_num = 0;
9843         dw_2.npdu_num = 0;
9844         dw_2.next_ext_header_type = 0x85;
9845         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_dw_2,
9846                  rte_cpu_to_be_32(dw_2.w32));
9847         if (gtp_psc_v) {
9848                 union {
9849                         uint32_t w32;
9850                         struct {
9851                                 uint8_t len;
9852                                 uint8_t type_flags;
9853                                 uint8_t qfi;
9854                                 uint8_t reserved;
9855                         };
9856                 } dw_0;
9857
9858                 /*Set extension header PDU type and Qos. */
9859                 if (!gtp_psc_m)
9860                         gtp_psc_m = &rte_flow_item_gtp_psc_mask;
9861                 dw_0.w32 = 0;
9862                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_m->pdu_type);
9863                 dw_0.qfi = gtp_psc_m->qfi;
9864                 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_first_ext_dw_0,
9865                          rte_cpu_to_be_32(dw_0.w32));
9866                 dw_0.w32 = 0;
9867                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_v->pdu_type &
9868                                                         gtp_psc_m->pdu_type);
9869                 dw_0.qfi = gtp_psc_v->qfi & gtp_psc_m->qfi;
9870                 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_first_ext_dw_0,
9871                          rte_cpu_to_be_32(dw_0.w32));
9872         }
9873         return 0;
9874 }
9875
9876 /**
9877  * Add eCPRI item to matcher and to the value.
9878  *
9879  * @param[in] dev
9880  *   The devich to configure through.
9881  * @param[in, out] matcher
9882  *   Flow matcher.
9883  * @param[in, out] key
9884  *   Flow matcher value.
9885  * @param[in] item
9886  *   Flow pattern to translate.
9887  * @param[in] samples
9888  *   Sample IDs to be used in the matching.
9889  */
9890 static void
9891 flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
9892                              void *key, const struct rte_flow_item *item)
9893 {
9894         struct mlx5_priv *priv = dev->data->dev_private;
9895         const struct rte_flow_item_ecpri *ecpri_m = item->mask;
9896         const struct rte_flow_item_ecpri *ecpri_v = item->spec;
9897         struct rte_ecpri_common_hdr common;
9898         void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
9899                                      misc_parameters_4);
9900         void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
9901         uint32_t *samples;
9902         void *dw_m;
9903         void *dw_v;
9904
9905         if (!ecpri_v)
9906                 return;
9907         if (!ecpri_m)
9908                 ecpri_m = &rte_flow_item_ecpri_mask;
9909         /*
9910          * Maximal four DW samples are supported in a single matching now.
9911          * Two are used now for a eCPRI matching:
9912          * 1. Type: one byte, mask should be 0x00ff0000 in network order
9913          * 2. ID of a message: one or two bytes, mask 0xffff0000 or 0xff000000
9914          *    if any.
9915          */
9916         if (!ecpri_m->hdr.common.u32)
9917                 return;
9918         samples = priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0].ids;
9919         /* Need to take the whole DW as the mask to fill the entry. */
9920         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
9921                             prog_sample_field_value_0);
9922         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
9923                             prog_sample_field_value_0);
9924         /* Already big endian (network order) in the header. */
9925         *(uint32_t *)dw_m = ecpri_m->hdr.common.u32;
9926         *(uint32_t *)dw_v = ecpri_v->hdr.common.u32 & ecpri_m->hdr.common.u32;
9927         /* Sample#0, used for matching type, offset 0. */
9928         MLX5_SET(fte_match_set_misc4, misc4_m,
9929                  prog_sample_field_id_0, samples[0]);
9930         /* It makes no sense to set the sample ID in the mask field. */
9931         MLX5_SET(fte_match_set_misc4, misc4_v,
9932                  prog_sample_field_id_0, samples[0]);
9933         /*
9934          * Checking if message body part needs to be matched.
9935          * Some wildcard rules only matching type field should be supported.
9936          */
9937         if (ecpri_m->hdr.dummy[0]) {
9938                 common.u32 = rte_be_to_cpu_32(ecpri_v->hdr.common.u32);
9939                 switch (common.type) {
9940                 case RTE_ECPRI_MSG_TYPE_IQ_DATA:
9941                 case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
9942                 case RTE_ECPRI_MSG_TYPE_DLY_MSR:
9943                         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
9944                                             prog_sample_field_value_1);
9945                         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
9946                                             prog_sample_field_value_1);
9947                         *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0];
9948                         *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0] &
9949                                             ecpri_m->hdr.dummy[0];
9950                         /* Sample#1, to match message body, offset 4. */
9951                         MLX5_SET(fte_match_set_misc4, misc4_m,
9952                                  prog_sample_field_id_1, samples[1]);
9953                         MLX5_SET(fte_match_set_misc4, misc4_v,
9954                                  prog_sample_field_id_1, samples[1]);
9955                         break;
9956                 default:
9957                         /* Others, do not match any sample ID. */
9958                         break;
9959                 }
9960         }
9961 }
9962
9963 /*
9964  * Add connection tracking status item to matcher
9965  *
9966  * @param[in] dev
9967  *   The devich to configure through.
9968  * @param[in, out] matcher
9969  *   Flow matcher.
9970  * @param[in, out] key
9971  *   Flow matcher value.
9972  * @param[in] item
9973  *   Flow pattern to translate.
9974  */
9975 static void
9976 flow_dv_translate_item_aso_ct(struct rte_eth_dev *dev,
9977                               void *matcher, void *key,
9978                               const struct rte_flow_item *item)
9979 {
9980         uint32_t reg_value = 0;
9981         int reg_id;
9982         /* 8LSB 0b 11/0000/11, middle 4 bits are reserved. */
9983         uint32_t reg_mask = 0;
9984         const struct rte_flow_item_conntrack *spec = item->spec;
9985         const struct rte_flow_item_conntrack *mask = item->mask;
9986         uint32_t flags;
9987         struct rte_flow_error error;
9988
9989         if (!mask)
9990                 mask = &rte_flow_item_conntrack_mask;
9991         if (!spec || !mask->flags)
9992                 return;
9993         flags = spec->flags & mask->flags;
9994         /* The conflict should be checked in the validation. */
9995         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID)
9996                 reg_value |= MLX5_CT_SYNDROME_VALID;
9997         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
9998                 reg_value |= MLX5_CT_SYNDROME_STATE_CHANGE;
9999         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID)
10000                 reg_value |= MLX5_CT_SYNDROME_INVALID;
10001         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)
10002                 reg_value |= MLX5_CT_SYNDROME_TRAP;
10003         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
10004                 reg_value |= MLX5_CT_SYNDROME_BAD_PACKET;
10005         if (mask->flags & (RTE_FLOW_CONNTRACK_PKT_STATE_VALID |
10006                            RTE_FLOW_CONNTRACK_PKT_STATE_INVALID |
10007                            RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED))
10008                 reg_mask |= 0xc0;
10009         if (mask->flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
10010                 reg_mask |= MLX5_CT_SYNDROME_STATE_CHANGE;
10011         if (mask->flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
10012                 reg_mask |= MLX5_CT_SYNDROME_BAD_PACKET;
10013         /* The REG_C_x value could be saved during startup. */
10014         reg_id = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, &error);
10015         if (reg_id == REG_NON)
10016                 return;
10017         flow_dv_match_meta_reg(matcher, key, (enum modify_reg)reg_id,
10018                                reg_value, reg_mask);
10019 }
10020
10021 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
10022
10023 #define HEADER_IS_ZERO(match_criteria, headers)                              \
10024         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
10025                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
10026
10027 /**
10028  * Calculate flow matcher enable bitmap.
10029  *
10030  * @param match_criteria
10031  *   Pointer to flow matcher criteria.
10032  *
10033  * @return
10034  *   Bitmap of enabled fields.
10035  */
10036 static uint8_t
10037 flow_dv_matcher_enable(uint32_t *match_criteria)
10038 {
10039         uint8_t match_criteria_enable;
10040
10041         match_criteria_enable =
10042                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
10043                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
10044         match_criteria_enable |=
10045                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
10046                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
10047         match_criteria_enable |=
10048                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
10049                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
10050         match_criteria_enable |=
10051                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
10052                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
10053         match_criteria_enable |=
10054                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
10055                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
10056         match_criteria_enable |=
10057                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) <<
10058                 MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT;
10059         match_criteria_enable |=
10060                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_5)) <<
10061                 MLX5_MATCH_CRITERIA_ENABLE_MISC5_BIT;
10062         return match_criteria_enable;
10063 }
10064
10065 static void
10066 __flow_dv_adjust_buf_size(size_t *size, uint8_t match_criteria)
10067 {
10068         /*
10069          * Check flow matching criteria first, subtract misc5/4 length if flow
10070          * doesn't own misc5/4 parameters. In some old rdma-core releases,
10071          * misc5/4 are not supported, and matcher creation failure is expected
10072          * w/o subtration. If misc5 is provided, misc4 must be counted in since
10073          * misc5 is right after misc4.
10074          */
10075         if (!(match_criteria & (1 << MLX5_MATCH_CRITERIA_ENABLE_MISC5_BIT))) {
10076                 *size = MLX5_ST_SZ_BYTES(fte_match_param) -
10077                         MLX5_ST_SZ_BYTES(fte_match_set_misc5);
10078                 if (!(match_criteria & (1 <<
10079                         MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT))) {
10080                         *size -= MLX5_ST_SZ_BYTES(fte_match_set_misc4);
10081                 }
10082         }
10083 }
10084
10085 static struct mlx5_list_entry *
10086 flow_dv_matcher_clone_cb(void *tool_ctx __rte_unused,
10087                          struct mlx5_list_entry *entry, void *cb_ctx)
10088 {
10089         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10090         struct mlx5_flow_dv_matcher *ref = ctx->data;
10091         struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
10092                                                             typeof(*tbl), tbl);
10093         struct mlx5_flow_dv_matcher *resource = mlx5_malloc(MLX5_MEM_ANY,
10094                                                             sizeof(*resource),
10095                                                             0, SOCKET_ID_ANY);
10096
10097         if (!resource) {
10098                 rte_flow_error_set(ctx->error, ENOMEM,
10099                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10100                                    "cannot create matcher");
10101                 return NULL;
10102         }
10103         memcpy(resource, entry, sizeof(*resource));
10104         resource->tbl = &tbl->tbl;
10105         return &resource->entry;
10106 }
10107
10108 static void
10109 flow_dv_matcher_clone_free_cb(void *tool_ctx __rte_unused,
10110                              struct mlx5_list_entry *entry)
10111 {
10112         mlx5_free(entry);
10113 }
10114
10115 struct mlx5_list_entry *
10116 flow_dv_tbl_create_cb(void *tool_ctx, void *cb_ctx)
10117 {
10118         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10119         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10120         struct rte_eth_dev *dev = ctx->dev;
10121         struct mlx5_flow_tbl_data_entry *tbl_data;
10122         struct mlx5_flow_tbl_tunnel_prm *tt_prm = ctx->data2;
10123         struct rte_flow_error *error = ctx->error;
10124         union mlx5_flow_tbl_key key = { .v64 = *(uint64_t *)(ctx->data) };
10125         struct mlx5_flow_tbl_resource *tbl;
10126         void *domain;
10127         uint32_t idx = 0;
10128         int ret;
10129
10130         tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
10131         if (!tbl_data) {
10132                 rte_flow_error_set(error, ENOMEM,
10133                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10134                                    NULL,
10135                                    "cannot allocate flow table data entry");
10136                 return NULL;
10137         }
10138         tbl_data->idx = idx;
10139         tbl_data->tunnel = tt_prm->tunnel;
10140         tbl_data->group_id = tt_prm->group_id;
10141         tbl_data->external = !!tt_prm->external;
10142         tbl_data->tunnel_offload = is_tunnel_offload_active(dev);
10143         tbl_data->is_egress = !!key.is_egress;
10144         tbl_data->is_transfer = !!key.is_fdb;
10145         tbl_data->dummy = !!key.dummy;
10146         tbl_data->level = key.level;
10147         tbl_data->id = key.id;
10148         tbl = &tbl_data->tbl;
10149         if (key.dummy)
10150                 return &tbl_data->entry;
10151         if (key.is_fdb)
10152                 domain = sh->fdb_domain;
10153         else if (key.is_egress)
10154                 domain = sh->tx_domain;
10155         else
10156                 domain = sh->rx_domain;
10157         ret = mlx5_flow_os_create_flow_tbl(domain, key.level, &tbl->obj);
10158         if (ret) {
10159                 rte_flow_error_set(error, ENOMEM,
10160                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10161                                    NULL, "cannot create flow table object");
10162                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
10163                 return NULL;
10164         }
10165         if (key.level != 0) {
10166                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
10167                                         (tbl->obj, &tbl_data->jump.action);
10168                 if (ret) {
10169                         rte_flow_error_set(error, ENOMEM,
10170                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10171                                            NULL,
10172                                            "cannot create flow jump action");
10173                         mlx5_flow_os_destroy_flow_tbl(tbl->obj);
10174                         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
10175                         return NULL;
10176                 }
10177         }
10178         MKSTR(matcher_name, "%s_%s_%u_%u_matcher_list",
10179               key.is_fdb ? "FDB" : "NIC", key.is_egress ? "egress" : "ingress",
10180               key.level, key.id);
10181         tbl_data->matchers = mlx5_list_create(matcher_name, sh, true,
10182                                               flow_dv_matcher_create_cb,
10183                                               flow_dv_matcher_match_cb,
10184                                               flow_dv_matcher_remove_cb,
10185                                               flow_dv_matcher_clone_cb,
10186                                               flow_dv_matcher_clone_free_cb);
10187         if (!tbl_data->matchers) {
10188                 rte_flow_error_set(error, ENOMEM,
10189                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10190                                    NULL,
10191                                    "cannot create tbl matcher list");
10192                 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
10193                 mlx5_flow_os_destroy_flow_tbl(tbl->obj);
10194                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
10195                 return NULL;
10196         }
10197         return &tbl_data->entry;
10198 }
10199
10200 int
10201 flow_dv_tbl_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
10202                      void *cb_ctx)
10203 {
10204         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10205         struct mlx5_flow_tbl_data_entry *tbl_data =
10206                 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10207         union mlx5_flow_tbl_key key = { .v64 =  *(uint64_t *)(ctx->data) };
10208
10209         return tbl_data->level != key.level ||
10210                tbl_data->id != key.id ||
10211                tbl_data->dummy != key.dummy ||
10212                tbl_data->is_transfer != !!key.is_fdb ||
10213                tbl_data->is_egress != !!key.is_egress;
10214 }
10215
10216 struct mlx5_list_entry *
10217 flow_dv_tbl_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
10218                       void *cb_ctx)
10219 {
10220         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10221         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10222         struct mlx5_flow_tbl_data_entry *tbl_data;
10223         struct rte_flow_error *error = ctx->error;
10224         uint32_t idx = 0;
10225
10226         tbl_data = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
10227         if (!tbl_data) {
10228                 rte_flow_error_set(error, ENOMEM,
10229                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10230                                    NULL,
10231                                    "cannot allocate flow table data entry");
10232                 return NULL;
10233         }
10234         memcpy(tbl_data, oentry, sizeof(*tbl_data));
10235         tbl_data->idx = idx;
10236         return &tbl_data->entry;
10237 }
10238
10239 void
10240 flow_dv_tbl_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10241 {
10242         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10243         struct mlx5_flow_tbl_data_entry *tbl_data =
10244                     container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10245
10246         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
10247 }
10248
10249 /**
10250  * Get a flow table.
10251  *
10252  * @param[in, out] dev
10253  *   Pointer to rte_eth_dev structure.
10254  * @param[in] table_level
10255  *   Table level to use.
10256  * @param[in] egress
10257  *   Direction of the table.
10258  * @param[in] transfer
10259  *   E-Switch or NIC flow.
10260  * @param[in] dummy
10261  *   Dummy entry for dv API.
10262  * @param[in] table_id
10263  *   Table id to use.
10264  * @param[out] error
10265  *   pointer to error structure.
10266  *
10267  * @return
10268  *   Returns tables resource based on the index, NULL in case of failed.
10269  */
10270 struct mlx5_flow_tbl_resource *
10271 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
10272                          uint32_t table_level, uint8_t egress,
10273                          uint8_t transfer,
10274                          bool external,
10275                          const struct mlx5_flow_tunnel *tunnel,
10276                          uint32_t group_id, uint8_t dummy,
10277                          uint32_t table_id,
10278                          struct rte_flow_error *error)
10279 {
10280         struct mlx5_priv *priv = dev->data->dev_private;
10281         union mlx5_flow_tbl_key table_key = {
10282                 {
10283                         .level = table_level,
10284                         .id = table_id,
10285                         .reserved = 0,
10286                         .dummy = !!dummy,
10287                         .is_fdb = !!transfer,
10288                         .is_egress = !!egress,
10289                 }
10290         };
10291         struct mlx5_flow_tbl_tunnel_prm tt_prm = {
10292                 .tunnel = tunnel,
10293                 .group_id = group_id,
10294                 .external = external,
10295         };
10296         struct mlx5_flow_cb_ctx ctx = {
10297                 .dev = dev,
10298                 .error = error,
10299                 .data = &table_key.v64,
10300                 .data2 = &tt_prm,
10301         };
10302         struct mlx5_list_entry *entry;
10303         struct mlx5_flow_tbl_data_entry *tbl_data;
10304
10305         entry = mlx5_hlist_register(priv->sh->flow_tbls, table_key.v64, &ctx);
10306         if (!entry) {
10307                 rte_flow_error_set(error, ENOMEM,
10308                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10309                                    "cannot get table");
10310                 return NULL;
10311         }
10312         DRV_LOG(DEBUG, "table_level %u table_id %u "
10313                 "tunnel %u group %u registered.",
10314                 table_level, table_id,
10315                 tunnel ? tunnel->tunnel_id : 0, group_id);
10316         tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10317         return &tbl_data->tbl;
10318 }
10319
10320 void
10321 flow_dv_tbl_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10322 {
10323         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10324         struct mlx5_flow_tbl_data_entry *tbl_data =
10325                     container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10326
10327         MLX5_ASSERT(entry && sh);
10328         if (tbl_data->jump.action)
10329                 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
10330         if (tbl_data->tbl.obj)
10331                 mlx5_flow_os_destroy_flow_tbl(tbl_data->tbl.obj);
10332         if (tbl_data->tunnel_offload && tbl_data->external) {
10333                 struct mlx5_list_entry *he;
10334                 struct mlx5_hlist *tunnel_grp_hash;
10335                 struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
10336                 union tunnel_tbl_key tunnel_key = {
10337                         .tunnel_id = tbl_data->tunnel ?
10338                                         tbl_data->tunnel->tunnel_id : 0,
10339                         .group = tbl_data->group_id
10340                 };
10341                 uint32_t table_level = tbl_data->level;
10342                 struct mlx5_flow_cb_ctx ctx = {
10343                         .data = (void *)&tunnel_key.val,
10344                 };
10345
10346                 tunnel_grp_hash = tbl_data->tunnel ?
10347                                         tbl_data->tunnel->groups :
10348                                         thub->groups;
10349                 he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val, &ctx);
10350                 if (he)
10351                         mlx5_hlist_unregister(tunnel_grp_hash, he);
10352                 DRV_LOG(DEBUG,
10353                         "table_level %u id %u tunnel %u group %u released.",
10354                         table_level,
10355                         tbl_data->id,
10356                         tbl_data->tunnel ?
10357                         tbl_data->tunnel->tunnel_id : 0,
10358                         tbl_data->group_id);
10359         }
10360         mlx5_list_destroy(tbl_data->matchers);
10361         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
10362 }
10363
10364 /**
10365  * Release a flow table.
10366  *
10367  * @param[in] sh
10368  *   Pointer to device shared structure.
10369  * @param[in] tbl
10370  *   Table resource to be released.
10371  *
10372  * @return
10373  *   Returns 0 if table was released, else return 1;
10374  */
10375 static int
10376 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
10377                              struct mlx5_flow_tbl_resource *tbl)
10378 {
10379         struct mlx5_flow_tbl_data_entry *tbl_data =
10380                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
10381
10382         if (!tbl)
10383                 return 0;
10384         return mlx5_hlist_unregister(sh->flow_tbls, &tbl_data->entry);
10385 }
10386
10387 int
10388 flow_dv_matcher_match_cb(void *tool_ctx __rte_unused,
10389                          struct mlx5_list_entry *entry, void *cb_ctx)
10390 {
10391         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10392         struct mlx5_flow_dv_matcher *ref = ctx->data;
10393         struct mlx5_flow_dv_matcher *cur = container_of(entry, typeof(*cur),
10394                                                         entry);
10395
10396         return cur->crc != ref->crc ||
10397                cur->priority != ref->priority ||
10398                memcmp((const void *)cur->mask.buf,
10399                       (const void *)ref->mask.buf, ref->mask.size);
10400 }
10401
10402 struct mlx5_list_entry *
10403 flow_dv_matcher_create_cb(void *tool_ctx, void *cb_ctx)
10404 {
10405         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10406         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10407         struct mlx5_flow_dv_matcher *ref = ctx->data;
10408         struct mlx5_flow_dv_matcher *resource;
10409         struct mlx5dv_flow_matcher_attr dv_attr = {
10410                 .type = IBV_FLOW_ATTR_NORMAL,
10411                 .match_mask = (void *)&ref->mask,
10412         };
10413         struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
10414                                                             typeof(*tbl), tbl);
10415         int ret;
10416
10417         resource = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*resource), 0,
10418                                SOCKET_ID_ANY);
10419         if (!resource) {
10420                 rte_flow_error_set(ctx->error, ENOMEM,
10421                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10422                                    "cannot create matcher");
10423                 return NULL;
10424         }
10425         *resource = *ref;
10426         dv_attr.match_criteria_enable =
10427                 flow_dv_matcher_enable(resource->mask.buf);
10428         __flow_dv_adjust_buf_size(&ref->mask.size,
10429                                   dv_attr.match_criteria_enable);
10430         dv_attr.priority = ref->priority;
10431         if (tbl->is_egress)
10432                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
10433         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->tbl.obj,
10434                                                &resource->matcher_object);
10435         if (ret) {
10436                 mlx5_free(resource);
10437                 rte_flow_error_set(ctx->error, ENOMEM,
10438                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10439                                    "cannot create matcher");
10440                 return NULL;
10441         }
10442         return &resource->entry;
10443 }
10444
10445 /**
10446  * Register the flow matcher.
10447  *
10448  * @param[in, out] dev
10449  *   Pointer to rte_eth_dev structure.
10450  * @param[in, out] matcher
10451  *   Pointer to flow matcher.
10452  * @param[in, out] key
10453  *   Pointer to flow table key.
10454  * @parm[in, out] dev_flow
10455  *   Pointer to the dev_flow.
10456  * @param[out] error
10457  *   pointer to error structure.
10458  *
10459  * @return
10460  *   0 on success otherwise -errno and errno is set.
10461  */
10462 static int
10463 flow_dv_matcher_register(struct rte_eth_dev *dev,
10464                          struct mlx5_flow_dv_matcher *ref,
10465                          union mlx5_flow_tbl_key *key,
10466                          struct mlx5_flow *dev_flow,
10467                          const struct mlx5_flow_tunnel *tunnel,
10468                          uint32_t group_id,
10469                          struct rte_flow_error *error)
10470 {
10471         struct mlx5_list_entry *entry;
10472         struct mlx5_flow_dv_matcher *resource;
10473         struct mlx5_flow_tbl_resource *tbl;
10474         struct mlx5_flow_tbl_data_entry *tbl_data;
10475         struct mlx5_flow_cb_ctx ctx = {
10476                 .error = error,
10477                 .data = ref,
10478         };
10479         /**
10480          * tunnel offload API requires this registration for cases when
10481          * tunnel match rule was inserted before tunnel set rule.
10482          */
10483         tbl = flow_dv_tbl_resource_get(dev, key->level,
10484                                        key->is_egress, key->is_fdb,
10485                                        dev_flow->external, tunnel,
10486                                        group_id, 0, key->id, error);
10487         if (!tbl)
10488                 return -rte_errno;      /* No need to refill the error info */
10489         tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
10490         ref->tbl = tbl;
10491         entry = mlx5_list_register(tbl_data->matchers, &ctx);
10492         if (!entry) {
10493                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
10494                 return rte_flow_error_set(error, ENOMEM,
10495                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10496                                           "cannot allocate ref memory");
10497         }
10498         resource = container_of(entry, typeof(*resource), entry);
10499         dev_flow->handle->dvh.matcher = resource;
10500         return 0;
10501 }
10502
10503 struct mlx5_list_entry *
10504 flow_dv_tag_create_cb(void *tool_ctx, void *cb_ctx)
10505 {
10506         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10507         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10508         struct mlx5_flow_dv_tag_resource *entry;
10509         uint32_t idx = 0;
10510         int ret;
10511
10512         entry = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
10513         if (!entry) {
10514                 rte_flow_error_set(ctx->error, ENOMEM,
10515                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10516                                    "cannot allocate resource memory");
10517                 return NULL;
10518         }
10519         entry->idx = idx;
10520         entry->tag_id = *(uint32_t *)(ctx->data);
10521         ret = mlx5_flow_os_create_flow_action_tag(entry->tag_id,
10522                                                   &entry->action);
10523         if (ret) {
10524                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], idx);
10525                 rte_flow_error_set(ctx->error, ENOMEM,
10526                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10527                                    NULL, "cannot create action");
10528                 return NULL;
10529         }
10530         return &entry->entry;
10531 }
10532
10533 int
10534 flow_dv_tag_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
10535                      void *cb_ctx)
10536 {
10537         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10538         struct mlx5_flow_dv_tag_resource *tag =
10539                    container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10540
10541         return *(uint32_t *)(ctx->data) != tag->tag_id;
10542 }
10543
10544 struct mlx5_list_entry *
10545 flow_dv_tag_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
10546                      void *cb_ctx)
10547 {
10548         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10549         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10550         struct mlx5_flow_dv_tag_resource *entry;
10551         uint32_t idx = 0;
10552
10553         entry = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
10554         if (!entry) {
10555                 rte_flow_error_set(ctx->error, ENOMEM,
10556                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10557                                    "cannot allocate tag resource memory");
10558                 return NULL;
10559         }
10560         memcpy(entry, oentry, sizeof(*entry));
10561         entry->idx = idx;
10562         return &entry->entry;
10563 }
10564
10565 void
10566 flow_dv_tag_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10567 {
10568         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10569         struct mlx5_flow_dv_tag_resource *tag =
10570                    container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10571
10572         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
10573 }
10574
10575 /**
10576  * Find existing tag resource or create and register a new one.
10577  *
10578  * @param dev[in, out]
10579  *   Pointer to rte_eth_dev structure.
10580  * @param[in, out] tag_be24
10581  *   Tag value in big endian then R-shift 8.
10582  * @parm[in, out] dev_flow
10583  *   Pointer to the dev_flow.
10584  * @param[out] error
10585  *   pointer to error structure.
10586  *
10587  * @return
10588  *   0 on success otherwise -errno and errno is set.
10589  */
10590 static int
10591 flow_dv_tag_resource_register
10592                         (struct rte_eth_dev *dev,
10593                          uint32_t tag_be24,
10594                          struct mlx5_flow *dev_flow,
10595                          struct rte_flow_error *error)
10596 {
10597         struct mlx5_priv *priv = dev->data->dev_private;
10598         struct mlx5_flow_dv_tag_resource *resource;
10599         struct mlx5_list_entry *entry;
10600         struct mlx5_flow_cb_ctx ctx = {
10601                                         .error = error,
10602                                         .data = &tag_be24,
10603                                         };
10604
10605         entry = mlx5_hlist_register(priv->sh->tag_table, tag_be24, &ctx);
10606         if (entry) {
10607                 resource = container_of(entry, struct mlx5_flow_dv_tag_resource,
10608                                         entry);
10609                 dev_flow->handle->dvh.rix_tag = resource->idx;
10610                 dev_flow->dv.tag_resource = resource;
10611                 return 0;
10612         }
10613         return -rte_errno;
10614 }
10615
10616 void
10617 flow_dv_tag_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
10618 {
10619         struct mlx5_dev_ctx_shared *sh = tool_ctx;
10620         struct mlx5_flow_dv_tag_resource *tag =
10621                    container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10622
10623         MLX5_ASSERT(tag && sh && tag->action);
10624         claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
10625         DRV_LOG(DEBUG, "Tag %p: removed.", (void *)tag);
10626         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
10627 }
10628
10629 /**
10630  * Release the tag.
10631  *
10632  * @param dev
10633  *   Pointer to Ethernet device.
10634  * @param tag_idx
10635  *   Tag index.
10636  *
10637  * @return
10638  *   1 while a reference on it exists, 0 when freed.
10639  */
10640 static int
10641 flow_dv_tag_release(struct rte_eth_dev *dev,
10642                     uint32_t tag_idx)
10643 {
10644         struct mlx5_priv *priv = dev->data->dev_private;
10645         struct mlx5_flow_dv_tag_resource *tag;
10646
10647         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
10648         if (!tag)
10649                 return 0;
10650         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
10651                 dev->data->port_id, (void *)tag, tag->entry.ref_cnt);
10652         return mlx5_hlist_unregister(priv->sh->tag_table, &tag->entry);
10653 }
10654
10655 /**
10656  * Translate port ID action to vport.
10657  *
10658  * @param[in] dev
10659  *   Pointer to rte_eth_dev structure.
10660  * @param[in] action
10661  *   Pointer to the port ID action.
10662  * @param[out] dst_port_id
10663  *   The target port ID.
10664  * @param[out] error
10665  *   Pointer to the error structure.
10666  *
10667  * @return
10668  *   0 on success, a negative errno value otherwise and rte_errno is set.
10669  */
10670 static int
10671 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
10672                                  const struct rte_flow_action *action,
10673                                  uint32_t *dst_port_id,
10674                                  struct rte_flow_error *error)
10675 {
10676         uint32_t port;
10677         struct mlx5_priv *priv;
10678         const struct rte_flow_action_port_id *conf =
10679                         (const struct rte_flow_action_port_id *)action->conf;
10680
10681         port = conf->original ? dev->data->port_id : conf->id;
10682         priv = mlx5_port_to_eswitch_info(port, false);
10683         if (!priv)
10684                 return rte_flow_error_set(error, -rte_errno,
10685                                           RTE_FLOW_ERROR_TYPE_ACTION,
10686                                           NULL,
10687                                           "No eswitch info was found for port");
10688 #ifdef HAVE_MLX5DV_DR_CREATE_DEST_IB_PORT
10689         /*
10690          * This parameter is transferred to
10691          * mlx5dv_dr_action_create_dest_ib_port().
10692          */
10693         *dst_port_id = priv->dev_port;
10694 #else
10695         /*
10696          * Legacy mode, no LAG configurations is supported.
10697          * This parameter is transferred to
10698          * mlx5dv_dr_action_create_dest_vport().
10699          */
10700         *dst_port_id = priv->vport_id;
10701 #endif
10702         return 0;
10703 }
10704
10705 /**
10706  * Create a counter with aging configuration.
10707  *
10708  * @param[in] dev
10709  *   Pointer to rte_eth_dev structure.
10710  * @param[in] dev_flow
10711  *   Pointer to the mlx5_flow.
10712  * @param[out] count
10713  *   Pointer to the counter action configuration.
10714  * @param[in] age
10715  *   Pointer to the aging action configuration.
10716  *
10717  * @return
10718  *   Index to flow counter on success, 0 otherwise.
10719  */
10720 static uint32_t
10721 flow_dv_translate_create_counter(struct rte_eth_dev *dev,
10722                                 struct mlx5_flow *dev_flow,
10723                                 const struct rte_flow_action_count *count,
10724                                 const struct rte_flow_action_age *age)
10725 {
10726         uint32_t counter;
10727         struct mlx5_age_param *age_param;
10728
10729         if (count && count->shared)
10730                 counter = flow_dv_counter_get_shared(dev, count->id);
10731         else
10732                 counter = flow_dv_counter_alloc(dev, !!age);
10733         if (!counter || age == NULL)
10734                 return counter;
10735         age_param = flow_dv_counter_idx_get_age(dev, counter);
10736         age_param->context = age->context ? age->context :
10737                 (void *)(uintptr_t)(dev_flow->flow_idx);
10738         age_param->timeout = age->timeout;
10739         age_param->port_id = dev->data->port_id;
10740         __atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);
10741         __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
10742         return counter;
10743 }
10744
10745 /**
10746  * Add Tx queue matcher
10747  *
10748  * @param[in] dev
10749  *   Pointer to the dev struct.
10750  * @param[in, out] matcher
10751  *   Flow matcher.
10752  * @param[in, out] key
10753  *   Flow matcher value.
10754  * @param[in] item
10755  *   Flow pattern to translate.
10756  * @param[in] inner
10757  *   Item is inner pattern.
10758  */
10759 static void
10760 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
10761                                 void *matcher, void *key,
10762                                 const struct rte_flow_item *item)
10763 {
10764         const struct mlx5_rte_flow_item_tx_queue *queue_m;
10765         const struct mlx5_rte_flow_item_tx_queue *queue_v;
10766         void *misc_m =
10767                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
10768         void *misc_v =
10769                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
10770         struct mlx5_txq_ctrl *txq;
10771         uint32_t queue;
10772
10773
10774         queue_m = (const void *)item->mask;
10775         if (!queue_m)
10776                 return;
10777         queue_v = (const void *)item->spec;
10778         if (!queue_v)
10779                 return;
10780         txq = mlx5_txq_get(dev, queue_v->queue);
10781         if (!txq)
10782                 return;
10783         queue = txq->obj->sq->id;
10784         MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue);
10785         MLX5_SET(fte_match_set_misc, misc_v, source_sqn,
10786                  queue & queue_m->queue);
10787         mlx5_txq_release(dev, queue_v->queue);
10788 }
10789
10790 /**
10791  * Set the hash fields according to the @p flow information.
10792  *
10793  * @param[in] dev_flow
10794  *   Pointer to the mlx5_flow.
10795  * @param[in] rss_desc
10796  *   Pointer to the mlx5_flow_rss_desc.
10797  */
10798 static void
10799 flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
10800                        struct mlx5_flow_rss_desc *rss_desc)
10801 {
10802         uint64_t items = dev_flow->handle->layers;
10803         int rss_inner = 0;
10804         uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
10805
10806         dev_flow->hash_fields = 0;
10807 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
10808         if (rss_desc->level >= 2) {
10809                 dev_flow->hash_fields |= IBV_RX_HASH_INNER;
10810                 rss_inner = 1;
10811         }
10812 #endif
10813         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
10814             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
10815                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
10816                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
10817                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
10818                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
10819                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
10820                         else
10821                                 dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
10822                 }
10823         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
10824                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
10825                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
10826                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
10827                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
10828                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
10829                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
10830                         else
10831                                 dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
10832                 }
10833         }
10834         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
10835             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
10836                 if (rss_types & ETH_RSS_UDP) {
10837                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
10838                                 dev_flow->hash_fields |=
10839                                                 IBV_RX_HASH_SRC_PORT_UDP;
10840                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
10841                                 dev_flow->hash_fields |=
10842                                                 IBV_RX_HASH_DST_PORT_UDP;
10843                         else
10844                                 dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
10845                 }
10846         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
10847                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
10848                 if (rss_types & ETH_RSS_TCP) {
10849                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
10850                                 dev_flow->hash_fields |=
10851                                                 IBV_RX_HASH_SRC_PORT_TCP;
10852                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
10853                                 dev_flow->hash_fields |=
10854                                                 IBV_RX_HASH_DST_PORT_TCP;
10855                         else
10856                                 dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
10857                 }
10858         }
10859 }
10860
10861 /**
10862  * Prepare an Rx Hash queue.
10863  *
10864  * @param dev
10865  *   Pointer to Ethernet device.
10866  * @param[in] dev_flow
10867  *   Pointer to the mlx5_flow.
10868  * @param[in] rss_desc
10869  *   Pointer to the mlx5_flow_rss_desc.
10870  * @param[out] hrxq_idx
10871  *   Hash Rx queue index.
10872  *
10873  * @return
10874  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
10875  */
10876 static struct mlx5_hrxq *
10877 flow_dv_hrxq_prepare(struct rte_eth_dev *dev,
10878                      struct mlx5_flow *dev_flow,
10879                      struct mlx5_flow_rss_desc *rss_desc,
10880                      uint32_t *hrxq_idx)
10881 {
10882         struct mlx5_priv *priv = dev->data->dev_private;
10883         struct mlx5_flow_handle *dh = dev_flow->handle;
10884         struct mlx5_hrxq *hrxq;
10885
10886         MLX5_ASSERT(rss_desc->queue_num);
10887         rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
10888         rss_desc->hash_fields = dev_flow->hash_fields;
10889         rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL);
10890         rss_desc->shared_rss = 0;
10891         *hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
10892         if (!*hrxq_idx)
10893                 return NULL;
10894         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
10895                               *hrxq_idx);
10896         return hrxq;
10897 }
10898
10899 /**
10900  * Release sample sub action resource.
10901  *
10902  * @param[in, out] dev
10903  *   Pointer to rte_eth_dev structure.
10904  * @param[in] act_res
10905  *   Pointer to sample sub action resource.
10906  */
10907 static void
10908 flow_dv_sample_sub_actions_release(struct rte_eth_dev *dev,
10909                                    struct mlx5_flow_sub_actions_idx *act_res)
10910 {
10911         if (act_res->rix_hrxq) {
10912                 mlx5_hrxq_release(dev, act_res->rix_hrxq);
10913                 act_res->rix_hrxq = 0;
10914         }
10915         if (act_res->rix_encap_decap) {
10916                 flow_dv_encap_decap_resource_release(dev,
10917                                                      act_res->rix_encap_decap);
10918                 act_res->rix_encap_decap = 0;
10919         }
10920         if (act_res->rix_port_id_action) {
10921                 flow_dv_port_id_action_resource_release(dev,
10922                                                 act_res->rix_port_id_action);
10923                 act_res->rix_port_id_action = 0;
10924         }
10925         if (act_res->rix_tag) {
10926                 flow_dv_tag_release(dev, act_res->rix_tag);
10927                 act_res->rix_tag = 0;
10928         }
10929         if (act_res->rix_jump) {
10930                 flow_dv_jump_tbl_resource_release(dev, act_res->rix_jump);
10931                 act_res->rix_jump = 0;
10932         }
10933 }
10934
10935 int
10936 flow_dv_sample_match_cb(void *tool_ctx __rte_unused,
10937                         struct mlx5_list_entry *entry, void *cb_ctx)
10938 {
10939         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10940         struct rte_eth_dev *dev = ctx->dev;
10941         struct mlx5_flow_dv_sample_resource *ctx_resource = ctx->data;
10942         struct mlx5_flow_dv_sample_resource *resource = container_of(entry,
10943                                                               typeof(*resource),
10944                                                               entry);
10945
10946         if (ctx_resource->ratio == resource->ratio &&
10947             ctx_resource->ft_type == resource->ft_type &&
10948             ctx_resource->ft_id == resource->ft_id &&
10949             ctx_resource->set_action == resource->set_action &&
10950             !memcmp((void *)&ctx_resource->sample_act,
10951                     (void *)&resource->sample_act,
10952                     sizeof(struct mlx5_flow_sub_actions_list))) {
10953                 /*
10954                  * Existing sample action should release the prepared
10955                  * sub-actions reference counter.
10956                  */
10957                 flow_dv_sample_sub_actions_release(dev,
10958                                                    &ctx_resource->sample_idx);
10959                 return 0;
10960         }
10961         return 1;
10962 }
10963
10964 struct mlx5_list_entry *
10965 flow_dv_sample_create_cb(void *tool_ctx __rte_unused, void *cb_ctx)
10966 {
10967         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10968         struct rte_eth_dev *dev = ctx->dev;
10969         struct mlx5_flow_dv_sample_resource *ctx_resource = ctx->data;
10970         void **sample_dv_actions = ctx_resource->sub_actions;
10971         struct mlx5_flow_dv_sample_resource *resource;
10972         struct mlx5dv_dr_flow_sampler_attr sampler_attr;
10973         struct mlx5_priv *priv = dev->data->dev_private;
10974         struct mlx5_dev_ctx_shared *sh = priv->sh;
10975         struct mlx5_flow_tbl_resource *tbl;
10976         uint32_t idx = 0;
10977         const uint32_t next_ft_step = 1;
10978         uint32_t next_ft_id = ctx_resource->ft_id + next_ft_step;
10979         uint8_t is_egress = 0;
10980         uint8_t is_transfer = 0;
10981         struct rte_flow_error *error = ctx->error;
10982
10983         /* Register new sample resource. */
10984         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
10985         if (!resource) {
10986                 rte_flow_error_set(error, ENOMEM,
10987                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10988                                           NULL,
10989                                           "cannot allocate resource memory");
10990                 return NULL;
10991         }
10992         *resource = *ctx_resource;
10993         /* Create normal path table level */
10994         if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
10995                 is_transfer = 1;
10996         else if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
10997                 is_egress = 1;
10998         tbl = flow_dv_tbl_resource_get(dev, next_ft_id,
10999                                         is_egress, is_transfer,
11000                                         true, NULL, 0, 0, 0, error);
11001         if (!tbl) {
11002                 rte_flow_error_set(error, ENOMEM,
11003                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11004                                           NULL,
11005                                           "fail to create normal path table "
11006                                           "for sample");
11007                 goto error;
11008         }
11009         resource->normal_path_tbl = tbl;
11010         if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
11011                 if (!sh->default_miss_action) {
11012                         rte_flow_error_set(error, ENOMEM,
11013                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11014                                                 NULL,
11015                                                 "default miss action was not "
11016                                                 "created");
11017                         goto error;
11018                 }
11019                 sample_dv_actions[ctx_resource->sample_act.actions_num++] =
11020                                                 sh->default_miss_action;
11021         }
11022         /* Create a DR sample action */
11023         sampler_attr.sample_ratio = resource->ratio;
11024         sampler_attr.default_next_table = tbl->obj;
11025         sampler_attr.num_sample_actions = ctx_resource->sample_act.actions_num;
11026         sampler_attr.sample_actions = (struct mlx5dv_dr_action **)
11027                                                         &sample_dv_actions[0];
11028         sampler_attr.action = resource->set_action;
11029         if (mlx5_os_flow_dr_create_flow_action_sampler
11030                         (&sampler_attr, &resource->verbs_action)) {
11031                 rte_flow_error_set(error, ENOMEM,
11032                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11033                                         NULL, "cannot create sample action");
11034                 goto error;
11035         }
11036         resource->idx = idx;
11037         resource->dev = dev;
11038         return &resource->entry;
11039 error:
11040         if (resource->ft_type != MLX5DV_FLOW_TABLE_TYPE_FDB)
11041                 flow_dv_sample_sub_actions_release(dev,
11042                                                    &resource->sample_idx);
11043         if (resource->normal_path_tbl)
11044                 flow_dv_tbl_resource_release(MLX5_SH(dev),
11045                                 resource->normal_path_tbl);
11046         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE], idx);
11047         return NULL;
11048
11049 }
11050
11051 struct mlx5_list_entry *
11052 flow_dv_sample_clone_cb(void *tool_ctx __rte_unused,
11053                          struct mlx5_list_entry *entry __rte_unused,
11054                          void *cb_ctx)
11055 {
11056         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11057         struct rte_eth_dev *dev = ctx->dev;
11058         struct mlx5_flow_dv_sample_resource *resource;
11059         struct mlx5_priv *priv = dev->data->dev_private;
11060         struct mlx5_dev_ctx_shared *sh = priv->sh;
11061         uint32_t idx = 0;
11062
11063         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
11064         if (!resource) {
11065                 rte_flow_error_set(ctx->error, ENOMEM,
11066                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11067                                           NULL,
11068                                           "cannot allocate resource memory");
11069                 return NULL;
11070         }
11071         memcpy(resource, entry, sizeof(*resource));
11072         resource->idx = idx;
11073         resource->dev = dev;
11074         return &resource->entry;
11075 }
11076
11077 void
11078 flow_dv_sample_clone_free_cb(void *tool_ctx __rte_unused,
11079                              struct mlx5_list_entry *entry)
11080 {
11081         struct mlx5_flow_dv_sample_resource *resource =
11082                                   container_of(entry, typeof(*resource), entry);
11083         struct rte_eth_dev *dev = resource->dev;
11084         struct mlx5_priv *priv = dev->data->dev_private;
11085
11086         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE], resource->idx);
11087 }
11088
11089 /**
11090  * Find existing sample resource or create and register a new one.
11091  *
11092  * @param[in, out] dev
11093  *   Pointer to rte_eth_dev structure.
11094  * @param[in] ref
11095  *   Pointer to sample resource reference.
11096  * @parm[in, out] dev_flow
11097  *   Pointer to the dev_flow.
11098  * @param[out] error
11099  *   pointer to error structure.
11100  *
11101  * @return
11102  *   0 on success otherwise -errno and errno is set.
11103  */
11104 static int
11105 flow_dv_sample_resource_register(struct rte_eth_dev *dev,
11106                          struct mlx5_flow_dv_sample_resource *ref,
11107                          struct mlx5_flow *dev_flow,
11108                          struct rte_flow_error *error)
11109 {
11110         struct mlx5_flow_dv_sample_resource *resource;
11111         struct mlx5_list_entry *entry;
11112         struct mlx5_priv *priv = dev->data->dev_private;
11113         struct mlx5_flow_cb_ctx ctx = {
11114                 .dev = dev,
11115                 .error = error,
11116                 .data = ref,
11117         };
11118
11119         entry = mlx5_list_register(priv->sh->sample_action_list, &ctx);
11120         if (!entry)
11121                 return -rte_errno;
11122         resource = container_of(entry, typeof(*resource), entry);
11123         dev_flow->handle->dvh.rix_sample = resource->idx;
11124         dev_flow->dv.sample_res = resource;
11125         return 0;
11126 }
11127
11128 int
11129 flow_dv_dest_array_match_cb(void *tool_ctx __rte_unused,
11130                             struct mlx5_list_entry *entry, void *cb_ctx)
11131 {
11132         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11133         struct mlx5_flow_dv_dest_array_resource *ctx_resource = ctx->data;
11134         struct rte_eth_dev *dev = ctx->dev;
11135         struct mlx5_flow_dv_dest_array_resource *resource =
11136                                   container_of(entry, typeof(*resource), entry);
11137         uint32_t idx = 0;
11138
11139         if (ctx_resource->num_of_dest == resource->num_of_dest &&
11140             ctx_resource->ft_type == resource->ft_type &&
11141             !memcmp((void *)resource->sample_act,
11142                     (void *)ctx_resource->sample_act,
11143                    (ctx_resource->num_of_dest *
11144                    sizeof(struct mlx5_flow_sub_actions_list)))) {
11145                 /*
11146                  * Existing sample action should release the prepared
11147                  * sub-actions reference counter.
11148                  */
11149                 for (idx = 0; idx < ctx_resource->num_of_dest; idx++)
11150                         flow_dv_sample_sub_actions_release(dev,
11151                                         &ctx_resource->sample_idx[idx]);
11152                 return 0;
11153         }
11154         return 1;
11155 }
11156
11157 struct mlx5_list_entry *
11158 flow_dv_dest_array_create_cb(void *tool_ctx __rte_unused, void *cb_ctx)
11159 {
11160         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11161         struct rte_eth_dev *dev = ctx->dev;
11162         struct mlx5_flow_dv_dest_array_resource *resource;
11163         struct mlx5_flow_dv_dest_array_resource *ctx_resource = ctx->data;
11164         struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 };
11165         struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM];
11166         struct mlx5_priv *priv = dev->data->dev_private;
11167         struct mlx5_dev_ctx_shared *sh = priv->sh;
11168         struct mlx5_flow_sub_actions_list *sample_act;
11169         struct mlx5dv_dr_domain *domain;
11170         uint32_t idx = 0, res_idx = 0;
11171         struct rte_flow_error *error = ctx->error;
11172         uint64_t action_flags;
11173         int ret;
11174
11175         /* Register new destination array resource. */
11176         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
11177                                             &res_idx);
11178         if (!resource) {
11179                 rte_flow_error_set(error, ENOMEM,
11180                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11181                                           NULL,
11182                                           "cannot allocate resource memory");
11183                 return NULL;
11184         }
11185         *resource = *ctx_resource;
11186         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
11187                 domain = sh->fdb_domain;
11188         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
11189                 domain = sh->rx_domain;
11190         else
11191                 domain = sh->tx_domain;
11192         for (idx = 0; idx < ctx_resource->num_of_dest; idx++) {
11193                 dest_attr[idx] = (struct mlx5dv_dr_action_dest_attr *)
11194                                  mlx5_malloc(MLX5_MEM_ZERO,
11195                                  sizeof(struct mlx5dv_dr_action_dest_attr),
11196                                  0, SOCKET_ID_ANY);
11197                 if (!dest_attr[idx]) {
11198                         rte_flow_error_set(error, ENOMEM,
11199                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11200                                            NULL,
11201                                            "cannot allocate resource memory");
11202                         goto error;
11203                 }
11204                 dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST;
11205                 sample_act = &ctx_resource->sample_act[idx];
11206                 action_flags = sample_act->action_flags;
11207                 switch (action_flags) {
11208                 case MLX5_FLOW_ACTION_QUEUE:
11209                         dest_attr[idx]->dest = sample_act->dr_queue_action;
11210                         break;
11211                 case (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_ENCAP):
11212                         dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST_REFORMAT;
11213                         dest_attr[idx]->dest_reformat = &dest_reformat[idx];
11214                         dest_attr[idx]->dest_reformat->reformat =
11215                                         sample_act->dr_encap_action;
11216                         dest_attr[idx]->dest_reformat->dest =
11217                                         sample_act->dr_port_id_action;
11218                         break;
11219                 case MLX5_FLOW_ACTION_PORT_ID:
11220                         dest_attr[idx]->dest = sample_act->dr_port_id_action;
11221                         break;
11222                 case MLX5_FLOW_ACTION_JUMP:
11223                         dest_attr[idx]->dest = sample_act->dr_jump_action;
11224                         break;
11225                 default:
11226                         rte_flow_error_set(error, EINVAL,
11227                                            RTE_FLOW_ERROR_TYPE_ACTION,
11228                                            NULL,
11229                                            "unsupported actions type");
11230                         goto error;
11231                 }
11232         }
11233         /* create a dest array actioin */
11234         ret = mlx5_os_flow_dr_create_flow_action_dest_array
11235                                                 (domain,
11236                                                  resource->num_of_dest,
11237                                                  dest_attr,
11238                                                  &resource->action);
11239         if (ret) {
11240                 rte_flow_error_set(error, ENOMEM,
11241                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11242                                    NULL,
11243                                    "cannot create destination array action");
11244                 goto error;
11245         }
11246         resource->idx = res_idx;
11247         resource->dev = dev;
11248         for (idx = 0; idx < ctx_resource->num_of_dest; idx++)
11249                 mlx5_free(dest_attr[idx]);
11250         return &resource->entry;
11251 error:
11252         for (idx = 0; idx < ctx_resource->num_of_dest; idx++) {
11253                 flow_dv_sample_sub_actions_release(dev,
11254                                                    &resource->sample_idx[idx]);
11255                 if (dest_attr[idx])
11256                         mlx5_free(dest_attr[idx]);
11257         }
11258         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY], res_idx);
11259         return NULL;
11260 }
11261
11262 struct mlx5_list_entry *
11263 flow_dv_dest_array_clone_cb(void *tool_ctx __rte_unused,
11264                             struct mlx5_list_entry *entry __rte_unused,
11265                             void *cb_ctx)
11266 {
11267         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11268         struct rte_eth_dev *dev = ctx->dev;
11269         struct mlx5_flow_dv_dest_array_resource *resource;
11270         struct mlx5_priv *priv = dev->data->dev_private;
11271         struct mlx5_dev_ctx_shared *sh = priv->sh;
11272         uint32_t res_idx = 0;
11273         struct rte_flow_error *error = ctx->error;
11274
11275         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
11276                                       &res_idx);
11277         if (!resource) {
11278                 rte_flow_error_set(error, ENOMEM,
11279                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11280                                           NULL,
11281                                           "cannot allocate dest-array memory");
11282                 return NULL;
11283         }
11284         memcpy(resource, entry, sizeof(*resource));
11285         resource->idx = res_idx;
11286         resource->dev = dev;
11287         return &resource->entry;
11288 }
11289
11290 void
11291 flow_dv_dest_array_clone_free_cb(void *tool_ctx __rte_unused,
11292                                  struct mlx5_list_entry *entry)
11293 {
11294         struct mlx5_flow_dv_dest_array_resource *resource =
11295                         container_of(entry, typeof(*resource), entry);
11296         struct rte_eth_dev *dev = resource->dev;
11297         struct mlx5_priv *priv = dev->data->dev_private;
11298
11299         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], resource->idx);
11300 }
11301
11302 /**
11303  * Find existing destination array resource or create and register a new one.
11304  *
11305  * @param[in, out] dev
11306  *   Pointer to rte_eth_dev structure.
11307  * @param[in] ref
11308  *   Pointer to destination array resource reference.
11309  * @parm[in, out] dev_flow
11310  *   Pointer to the dev_flow.
11311  * @param[out] error
11312  *   pointer to error structure.
11313  *
11314  * @return
11315  *   0 on success otherwise -errno and errno is set.
11316  */
11317 static int
11318 flow_dv_dest_array_resource_register(struct rte_eth_dev *dev,
11319                          struct mlx5_flow_dv_dest_array_resource *ref,
11320                          struct mlx5_flow *dev_flow,
11321                          struct rte_flow_error *error)
11322 {
11323         struct mlx5_flow_dv_dest_array_resource *resource;
11324         struct mlx5_priv *priv = dev->data->dev_private;
11325         struct mlx5_list_entry *entry;
11326         struct mlx5_flow_cb_ctx ctx = {
11327                 .dev = dev,
11328                 .error = error,
11329                 .data = ref,
11330         };
11331
11332         entry = mlx5_list_register(priv->sh->dest_array_list, &ctx);
11333         if (!entry)
11334                 return -rte_errno;
11335         resource = container_of(entry, typeof(*resource), entry);
11336         dev_flow->handle->dvh.rix_dest_array = resource->idx;
11337         dev_flow->dv.dest_array_res = resource;
11338         return 0;
11339 }
11340
11341 /**
11342  * Convert Sample action to DV specification.
11343  *
11344  * @param[in] dev
11345  *   Pointer to rte_eth_dev structure.
11346  * @param[in] action
11347  *   Pointer to sample action structure.
11348  * @param[in, out] dev_flow
11349  *   Pointer to the mlx5_flow.
11350  * @param[in] attr
11351  *   Pointer to the flow attributes.
11352  * @param[in, out] num_of_dest
11353  *   Pointer to the num of destination.
11354  * @param[in, out] sample_actions
11355  *   Pointer to sample actions list.
11356  * @param[in, out] res
11357  *   Pointer to sample resource.
11358  * @param[out] error
11359  *   Pointer to the error structure.
11360  *
11361  * @return
11362  *   0 on success, a negative errno value otherwise and rte_errno is set.
11363  */
11364 static int
11365 flow_dv_translate_action_sample(struct rte_eth_dev *dev,
11366                                 const struct rte_flow_action_sample *action,
11367                                 struct mlx5_flow *dev_flow,
11368                                 const struct rte_flow_attr *attr,
11369                                 uint32_t *num_of_dest,
11370                                 void **sample_actions,
11371                                 struct mlx5_flow_dv_sample_resource *res,
11372                                 struct rte_flow_error *error)
11373 {
11374         struct mlx5_priv *priv = dev->data->dev_private;
11375         const struct rte_flow_action *sub_actions;
11376         struct mlx5_flow_sub_actions_list *sample_act;
11377         struct mlx5_flow_sub_actions_idx *sample_idx;
11378         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
11379         struct rte_flow *flow = dev_flow->flow;
11380         struct mlx5_flow_rss_desc *rss_desc;
11381         uint64_t action_flags = 0;
11382
11383         MLX5_ASSERT(wks);
11384         rss_desc = &wks->rss_desc;
11385         sample_act = &res->sample_act;
11386         sample_idx = &res->sample_idx;
11387         res->ratio = action->ratio;
11388         sub_actions = action->actions;
11389         for (; sub_actions->type != RTE_FLOW_ACTION_TYPE_END; sub_actions++) {
11390                 int type = sub_actions->type;
11391                 uint32_t pre_rix = 0;
11392                 void *pre_r;
11393                 switch (type) {
11394                 case RTE_FLOW_ACTION_TYPE_QUEUE:
11395                 {
11396                         const struct rte_flow_action_queue *queue;
11397                         struct mlx5_hrxq *hrxq;
11398                         uint32_t hrxq_idx;
11399
11400                         queue = sub_actions->conf;
11401                         rss_desc->queue_num = 1;
11402                         rss_desc->queue[0] = queue->index;
11403                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11404                                                     rss_desc, &hrxq_idx);
11405                         if (!hrxq)
11406                                 return rte_flow_error_set
11407                                         (error, rte_errno,
11408                                          RTE_FLOW_ERROR_TYPE_ACTION,
11409                                          NULL,
11410                                          "cannot create fate queue");
11411                         sample_act->dr_queue_action = hrxq->action;
11412                         sample_idx->rix_hrxq = hrxq_idx;
11413                         sample_actions[sample_act->actions_num++] =
11414                                                 hrxq->action;
11415                         (*num_of_dest)++;
11416                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
11417                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11418                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11419                         dev_flow->handle->fate_action =
11420                                         MLX5_FLOW_FATE_QUEUE;
11421                         break;
11422                 }
11423                 case RTE_FLOW_ACTION_TYPE_RSS:
11424                 {
11425                         struct mlx5_hrxq *hrxq;
11426                         uint32_t hrxq_idx;
11427                         const struct rte_flow_action_rss *rss;
11428                         const uint8_t *rss_key;
11429
11430                         rss = sub_actions->conf;
11431                         memcpy(rss_desc->queue, rss->queue,
11432                                rss->queue_num * sizeof(uint16_t));
11433                         rss_desc->queue_num = rss->queue_num;
11434                         /* NULL RSS key indicates default RSS key. */
11435                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
11436                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
11437                         /*
11438                          * rss->level and rss.types should be set in advance
11439                          * when expanding items for RSS.
11440                          */
11441                         flow_dv_hashfields_set(dev_flow, rss_desc);
11442                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11443                                                     rss_desc, &hrxq_idx);
11444                         if (!hrxq)
11445                                 return rte_flow_error_set
11446                                         (error, rte_errno,
11447                                          RTE_FLOW_ERROR_TYPE_ACTION,
11448                                          NULL,
11449                                          "cannot create fate queue");
11450                         sample_act->dr_queue_action = hrxq->action;
11451                         sample_idx->rix_hrxq = hrxq_idx;
11452                         sample_actions[sample_act->actions_num++] =
11453                                                 hrxq->action;
11454                         (*num_of_dest)++;
11455                         action_flags |= MLX5_FLOW_ACTION_RSS;
11456                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11457                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11458                         dev_flow->handle->fate_action =
11459                                         MLX5_FLOW_FATE_QUEUE;
11460                         break;
11461                 }
11462                 case RTE_FLOW_ACTION_TYPE_MARK:
11463                 {
11464                         uint32_t tag_be = mlx5_flow_mark_set
11465                                 (((const struct rte_flow_action_mark *)
11466                                 (sub_actions->conf))->id);
11467
11468                         dev_flow->handle->mark = 1;
11469                         pre_rix = dev_flow->handle->dvh.rix_tag;
11470                         /* Save the mark resource before sample */
11471                         pre_r = dev_flow->dv.tag_resource;
11472                         if (flow_dv_tag_resource_register(dev, tag_be,
11473                                                   dev_flow, error))
11474                                 return -rte_errno;
11475                         MLX5_ASSERT(dev_flow->dv.tag_resource);
11476                         sample_act->dr_tag_action =
11477                                 dev_flow->dv.tag_resource->action;
11478                         sample_idx->rix_tag =
11479                                 dev_flow->handle->dvh.rix_tag;
11480                         sample_actions[sample_act->actions_num++] =
11481                                                 sample_act->dr_tag_action;
11482                         /* Recover the mark resource after sample */
11483                         dev_flow->dv.tag_resource = pre_r;
11484                         dev_flow->handle->dvh.rix_tag = pre_rix;
11485                         action_flags |= MLX5_FLOW_ACTION_MARK;
11486                         break;
11487                 }
11488                 case RTE_FLOW_ACTION_TYPE_COUNT:
11489                 {
11490                         if (!flow->counter) {
11491                                 flow->counter =
11492                                         flow_dv_translate_create_counter(dev,
11493                                                 dev_flow, sub_actions->conf,
11494                                                 0);
11495                                 if (!flow->counter)
11496                                         return rte_flow_error_set
11497                                                 (error, rte_errno,
11498                                                 RTE_FLOW_ERROR_TYPE_ACTION,
11499                                                 NULL,
11500                                                 "cannot create counter"
11501                                                 " object.");
11502                         }
11503                         sample_act->dr_cnt_action =
11504                                   (flow_dv_counter_get_by_idx(dev,
11505                                   flow->counter, NULL))->action;
11506                         sample_actions[sample_act->actions_num++] =
11507                                                 sample_act->dr_cnt_action;
11508                         action_flags |= MLX5_FLOW_ACTION_COUNT;
11509                         break;
11510                 }
11511                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
11512                 {
11513                         struct mlx5_flow_dv_port_id_action_resource
11514                                         port_id_resource;
11515                         uint32_t port_id = 0;
11516
11517                         memset(&port_id_resource, 0, sizeof(port_id_resource));
11518                         /* Save the port id resource before sample */
11519                         pre_rix = dev_flow->handle->rix_port_id_action;
11520                         pre_r = dev_flow->dv.port_id_action;
11521                         if (flow_dv_translate_action_port_id(dev, sub_actions,
11522                                                              &port_id, error))
11523                                 return -rte_errno;
11524                         port_id_resource.port_id = port_id;
11525                         if (flow_dv_port_id_action_resource_register
11526                             (dev, &port_id_resource, dev_flow, error))
11527                                 return -rte_errno;
11528                         sample_act->dr_port_id_action =
11529                                 dev_flow->dv.port_id_action->action;
11530                         sample_idx->rix_port_id_action =
11531                                 dev_flow->handle->rix_port_id_action;
11532                         sample_actions[sample_act->actions_num++] =
11533                                                 sample_act->dr_port_id_action;
11534                         /* Recover the port id resource after sample */
11535                         dev_flow->dv.port_id_action = pre_r;
11536                         dev_flow->handle->rix_port_id_action = pre_rix;
11537                         (*num_of_dest)++;
11538                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
11539                         break;
11540                 }
11541                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
11542                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
11543                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
11544                         /* Save the encap resource before sample */
11545                         pre_rix = dev_flow->handle->dvh.rix_encap_decap;
11546                         pre_r = dev_flow->dv.encap_decap;
11547                         if (flow_dv_create_action_l2_encap(dev, sub_actions,
11548                                                            dev_flow,
11549                                                            attr->transfer,
11550                                                            error))
11551                                 return -rte_errno;
11552                         sample_act->dr_encap_action =
11553                                 dev_flow->dv.encap_decap->action;
11554                         sample_idx->rix_encap_decap =
11555                                 dev_flow->handle->dvh.rix_encap_decap;
11556                         sample_actions[sample_act->actions_num++] =
11557                                                 sample_act->dr_encap_action;
11558                         /* Recover the encap resource after sample */
11559                         dev_flow->dv.encap_decap = pre_r;
11560                         dev_flow->handle->dvh.rix_encap_decap = pre_rix;
11561                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
11562                         break;
11563                 default:
11564                         return rte_flow_error_set(error, EINVAL,
11565                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11566                                 NULL,
11567                                 "Not support for sampler action");
11568                 }
11569         }
11570         sample_act->action_flags = action_flags;
11571         res->ft_id = dev_flow->dv.group;
11572         if (attr->transfer) {
11573                 union {
11574                         uint32_t action_in[MLX5_ST_SZ_DW(set_action_in)];
11575                         uint64_t set_action;
11576                 } action_ctx = { .set_action = 0 };
11577
11578                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
11579                 MLX5_SET(set_action_in, action_ctx.action_in, action_type,
11580                          MLX5_MODIFICATION_TYPE_SET);
11581                 MLX5_SET(set_action_in, action_ctx.action_in, field,
11582                          MLX5_MODI_META_REG_C_0);
11583                 MLX5_SET(set_action_in, action_ctx.action_in, data,
11584                          priv->vport_meta_tag);
11585                 res->set_action = action_ctx.set_action;
11586         } else if (attr->ingress) {
11587                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
11588         } else {
11589                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX;
11590         }
11591         return 0;
11592 }
11593
11594 /**
11595  * Convert Sample action to DV specification.
11596  *
11597  * @param[in] dev
11598  *   Pointer to rte_eth_dev structure.
11599  * @param[in, out] dev_flow
11600  *   Pointer to the mlx5_flow.
11601  * @param[in] num_of_dest
11602  *   The num of destination.
11603  * @param[in, out] res
11604  *   Pointer to sample resource.
11605  * @param[in, out] mdest_res
11606  *   Pointer to destination array resource.
11607  * @param[in] sample_actions
11608  *   Pointer to sample path actions list.
11609  * @param[in] action_flags
11610  *   Holds the actions detected until now.
11611  * @param[out] error
11612  *   Pointer to the error structure.
11613  *
11614  * @return
11615  *   0 on success, a negative errno value otherwise and rte_errno is set.
11616  */
11617 static int
11618 flow_dv_create_action_sample(struct rte_eth_dev *dev,
11619                              struct mlx5_flow *dev_flow,
11620                              uint32_t num_of_dest,
11621                              struct mlx5_flow_dv_sample_resource *res,
11622                              struct mlx5_flow_dv_dest_array_resource *mdest_res,
11623                              void **sample_actions,
11624                              uint64_t action_flags,
11625                              struct rte_flow_error *error)
11626 {
11627         /* update normal path action resource into last index of array */
11628         uint32_t dest_index = MLX5_MAX_DEST_NUM - 1;
11629         struct mlx5_flow_sub_actions_list *sample_act =
11630                                         &mdest_res->sample_act[dest_index];
11631         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
11632         struct mlx5_flow_rss_desc *rss_desc;
11633         uint32_t normal_idx = 0;
11634         struct mlx5_hrxq *hrxq;
11635         uint32_t hrxq_idx;
11636
11637         MLX5_ASSERT(wks);
11638         rss_desc = &wks->rss_desc;
11639         if (num_of_dest > 1) {
11640                 if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
11641                         /* Handle QP action for mirroring */
11642                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11643                                                     rss_desc, &hrxq_idx);
11644                         if (!hrxq)
11645                                 return rte_flow_error_set
11646                                      (error, rte_errno,
11647                                       RTE_FLOW_ERROR_TYPE_ACTION,
11648                                       NULL,
11649                                       "cannot create rx queue");
11650                         normal_idx++;
11651                         mdest_res->sample_idx[dest_index].rix_hrxq = hrxq_idx;
11652                         sample_act->dr_queue_action = hrxq->action;
11653                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11654                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11655                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
11656                 }
11657                 if (sample_act->action_flags & MLX5_FLOW_ACTION_ENCAP) {
11658                         normal_idx++;
11659                         mdest_res->sample_idx[dest_index].rix_encap_decap =
11660                                 dev_flow->handle->dvh.rix_encap_decap;
11661                         sample_act->dr_encap_action =
11662                                 dev_flow->dv.encap_decap->action;
11663                         dev_flow->handle->dvh.rix_encap_decap = 0;
11664                 }
11665                 if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) {
11666                         normal_idx++;
11667                         mdest_res->sample_idx[dest_index].rix_port_id_action =
11668                                 dev_flow->handle->rix_port_id_action;
11669                         sample_act->dr_port_id_action =
11670                                 dev_flow->dv.port_id_action->action;
11671                         dev_flow->handle->rix_port_id_action = 0;
11672                 }
11673                 if (sample_act->action_flags & MLX5_FLOW_ACTION_JUMP) {
11674                         normal_idx++;
11675                         mdest_res->sample_idx[dest_index].rix_jump =
11676                                 dev_flow->handle->rix_jump;
11677                         sample_act->dr_jump_action =
11678                                 dev_flow->dv.jump->action;
11679                         dev_flow->handle->rix_jump = 0;
11680                 }
11681                 sample_act->actions_num = normal_idx;
11682                 /* update sample action resource into first index of array */
11683                 mdest_res->ft_type = res->ft_type;
11684                 memcpy(&mdest_res->sample_idx[0], &res->sample_idx,
11685                                 sizeof(struct mlx5_flow_sub_actions_idx));
11686                 memcpy(&mdest_res->sample_act[0], &res->sample_act,
11687                                 sizeof(struct mlx5_flow_sub_actions_list));
11688                 mdest_res->num_of_dest = num_of_dest;
11689                 if (flow_dv_dest_array_resource_register(dev, mdest_res,
11690                                                          dev_flow, error))
11691                         return rte_flow_error_set(error, EINVAL,
11692                                                   RTE_FLOW_ERROR_TYPE_ACTION,
11693                                                   NULL, "can't create sample "
11694                                                   "action");
11695         } else {
11696                 res->sub_actions = sample_actions;
11697                 if (flow_dv_sample_resource_register(dev, res, dev_flow, error))
11698                         return rte_flow_error_set(error, EINVAL,
11699                                                   RTE_FLOW_ERROR_TYPE_ACTION,
11700                                                   NULL,
11701                                                   "can't create sample action");
11702         }
11703         return 0;
11704 }
11705
11706 /**
11707  * Remove an ASO age action from age actions list.
11708  *
11709  * @param[in] dev
11710  *   Pointer to the Ethernet device structure.
11711  * @param[in] age
11712  *   Pointer to the aso age action handler.
11713  */
11714 static void
11715 flow_dv_aso_age_remove_from_age(struct rte_eth_dev *dev,
11716                                 struct mlx5_aso_age_action *age)
11717 {
11718         struct mlx5_age_info *age_info;
11719         struct mlx5_age_param *age_param = &age->age_params;
11720         struct mlx5_priv *priv = dev->data->dev_private;
11721         uint16_t expected = AGE_CANDIDATE;
11722
11723         age_info = GET_PORT_AGE_INFO(priv);
11724         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
11725                                          AGE_FREE, false, __ATOMIC_RELAXED,
11726                                          __ATOMIC_RELAXED)) {
11727                 /**
11728                  * We need the lock even it is age timeout,
11729                  * since age action may still in process.
11730                  */
11731                 rte_spinlock_lock(&age_info->aged_sl);
11732                 LIST_REMOVE(age, next);
11733                 rte_spinlock_unlock(&age_info->aged_sl);
11734                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
11735         }
11736 }
11737
11738 /**
11739  * Release an ASO age action.
11740  *
11741  * @param[in] dev
11742  *   Pointer to the Ethernet device structure.
11743  * @param[in] age_idx
11744  *   Index of ASO age action to release.
11745  * @param[in] flow
11746  *   True if the release operation is during flow destroy operation.
11747  *   False if the release operation is during action destroy operation.
11748  *
11749  * @return
11750  *   0 when age action was removed, otherwise the number of references.
11751  */
11752 static int
11753 flow_dv_aso_age_release(struct rte_eth_dev *dev, uint32_t age_idx)
11754 {
11755         struct mlx5_priv *priv = dev->data->dev_private;
11756         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11757         struct mlx5_aso_age_action *age = flow_aso_age_get_by_idx(dev, age_idx);
11758         uint32_t ret = __atomic_sub_fetch(&age->refcnt, 1, __ATOMIC_RELAXED);
11759
11760         if (!ret) {
11761                 flow_dv_aso_age_remove_from_age(dev, age);
11762                 rte_spinlock_lock(&mng->free_sl);
11763                 LIST_INSERT_HEAD(&mng->free, age, next);
11764                 rte_spinlock_unlock(&mng->free_sl);
11765         }
11766         return ret;
11767 }
11768
11769 /**
11770  * Resize the ASO age pools array by MLX5_CNT_CONTAINER_RESIZE pools.
11771  *
11772  * @param[in] dev
11773  *   Pointer to the Ethernet device structure.
11774  *
11775  * @return
11776  *   0 on success, otherwise negative errno value and rte_errno is set.
11777  */
11778 static int
11779 flow_dv_aso_age_pools_resize(struct rte_eth_dev *dev)
11780 {
11781         struct mlx5_priv *priv = dev->data->dev_private;
11782         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11783         void *old_pools = mng->pools;
11784         uint32_t resize = mng->n + MLX5_CNT_CONTAINER_RESIZE;
11785         uint32_t mem_size = sizeof(struct mlx5_aso_age_pool *) * resize;
11786         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
11787
11788         if (!pools) {
11789                 rte_errno = ENOMEM;
11790                 return -ENOMEM;
11791         }
11792         if (old_pools) {
11793                 memcpy(pools, old_pools,
11794                        mng->n * sizeof(struct mlx5_flow_counter_pool *));
11795                 mlx5_free(old_pools);
11796         } else {
11797                 /* First ASO flow hit allocation - starting ASO data-path. */
11798                 int ret = mlx5_aso_flow_hit_queue_poll_start(priv->sh);
11799
11800                 if (ret) {
11801                         mlx5_free(pools);
11802                         return ret;
11803                 }
11804         }
11805         mng->n = resize;
11806         mng->pools = pools;
11807         return 0;
11808 }
11809
11810 /**
11811  * Create and initialize a new ASO aging pool.
11812  *
11813  * @param[in] dev
11814  *   Pointer to the Ethernet device structure.
11815  * @param[out] age_free
11816  *   Where to put the pointer of a new age action.
11817  *
11818  * @return
11819  *   The age actions pool pointer and @p age_free is set on success,
11820  *   NULL otherwise and rte_errno is set.
11821  */
11822 static struct mlx5_aso_age_pool *
11823 flow_dv_age_pool_create(struct rte_eth_dev *dev,
11824                         struct mlx5_aso_age_action **age_free)
11825 {
11826         struct mlx5_priv *priv = dev->data->dev_private;
11827         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11828         struct mlx5_aso_age_pool *pool = NULL;
11829         struct mlx5_devx_obj *obj = NULL;
11830         uint32_t i;
11831
11832         obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->ctx,
11833                                                     priv->sh->pdn);
11834         if (!obj) {
11835                 rte_errno = ENODATA;
11836                 DRV_LOG(ERR, "Failed to create flow_hit_aso_obj using DevX.");
11837                 return NULL;
11838         }
11839         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
11840         if (!pool) {
11841                 claim_zero(mlx5_devx_cmd_destroy(obj));
11842                 rte_errno = ENOMEM;
11843                 return NULL;
11844         }
11845         pool->flow_hit_aso_obj = obj;
11846         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
11847         rte_spinlock_lock(&mng->resize_sl);
11848         pool->index = mng->next;
11849         /* Resize pools array if there is no room for the new pool in it. */
11850         if (pool->index == mng->n && flow_dv_aso_age_pools_resize(dev)) {
11851                 claim_zero(mlx5_devx_cmd_destroy(obj));
11852                 mlx5_free(pool);
11853                 rte_spinlock_unlock(&mng->resize_sl);
11854                 return NULL;
11855         }
11856         mng->pools[pool->index] = pool;
11857         mng->next++;
11858         rte_spinlock_unlock(&mng->resize_sl);
11859         /* Assign the first action in the new pool, the rest go to free list. */
11860         *age_free = &pool->actions[0];
11861         for (i = 1; i < MLX5_ASO_AGE_ACTIONS_PER_POOL; i++) {
11862                 pool->actions[i].offset = i;
11863                 LIST_INSERT_HEAD(&mng->free, &pool->actions[i], next);
11864         }
11865         return pool;
11866 }
11867
11868 /**
11869  * Allocate a ASO aging bit.
11870  *
11871  * @param[in] dev
11872  *   Pointer to the Ethernet device structure.
11873  * @param[out] error
11874  *   Pointer to the error structure.
11875  *
11876  * @return
11877  *   Index to ASO age action on success, 0 otherwise and rte_errno is set.
11878  */
11879 static uint32_t
11880 flow_dv_aso_age_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
11881 {
11882         struct mlx5_priv *priv = dev->data->dev_private;
11883         const struct mlx5_aso_age_pool *pool;
11884         struct mlx5_aso_age_action *age_free = NULL;
11885         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11886
11887         MLX5_ASSERT(mng);
11888         /* Try to get the next free age action bit. */
11889         rte_spinlock_lock(&mng->free_sl);
11890         age_free = LIST_FIRST(&mng->free);
11891         if (age_free) {
11892                 LIST_REMOVE(age_free, next);
11893         } else if (!flow_dv_age_pool_create(dev, &age_free)) {
11894                 rte_spinlock_unlock(&mng->free_sl);
11895                 rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
11896                                    NULL, "failed to create ASO age pool");
11897                 return 0; /* 0 is an error. */
11898         }
11899         rte_spinlock_unlock(&mng->free_sl);
11900         pool = container_of
11901           ((const struct mlx5_aso_age_action (*)[MLX5_ASO_AGE_ACTIONS_PER_POOL])
11902                   (age_free - age_free->offset), const struct mlx5_aso_age_pool,
11903                                                                        actions);
11904         if (!age_free->dr_action) {
11905                 int reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_FLOW_HIT, 0,
11906                                                  error);
11907
11908                 if (reg_c < 0) {
11909                         rte_flow_error_set(error, rte_errno,
11910                                            RTE_FLOW_ERROR_TYPE_ACTION,
11911                                            NULL, "failed to get reg_c "
11912                                            "for ASO flow hit");
11913                         return 0; /* 0 is an error. */
11914                 }
11915 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
11916                 age_free->dr_action = mlx5_glue->dv_create_flow_action_aso
11917                                 (priv->sh->rx_domain,
11918                                  pool->flow_hit_aso_obj->obj, age_free->offset,
11919                                  MLX5DV_DR_ACTION_FLAGS_ASO_FIRST_HIT_SET,
11920                                  (reg_c - REG_C_0));
11921 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
11922                 if (!age_free->dr_action) {
11923                         rte_errno = errno;
11924                         rte_spinlock_lock(&mng->free_sl);
11925                         LIST_INSERT_HEAD(&mng->free, age_free, next);
11926                         rte_spinlock_unlock(&mng->free_sl);
11927                         rte_flow_error_set(error, rte_errno,
11928                                            RTE_FLOW_ERROR_TYPE_ACTION,
11929                                            NULL, "failed to create ASO "
11930                                            "flow hit action");
11931                         return 0; /* 0 is an error. */
11932                 }
11933         }
11934         __atomic_store_n(&age_free->refcnt, 1, __ATOMIC_RELAXED);
11935         return pool->index | ((age_free->offset + 1) << 16);
11936 }
11937
11938 /**
11939  * Initialize flow ASO age parameters.
11940  *
11941  * @param[in] dev
11942  *   Pointer to rte_eth_dev structure.
11943  * @param[in] age_idx
11944  *   Index of ASO age action.
11945  * @param[in] context
11946  *   Pointer to flow counter age context.
11947  * @param[in] timeout
11948  *   Aging timeout in seconds.
11949  *
11950  */
11951 static void
11952 flow_dv_aso_age_params_init(struct rte_eth_dev *dev,
11953                             uint32_t age_idx,
11954                             void *context,
11955                             uint32_t timeout)
11956 {
11957         struct mlx5_aso_age_action *aso_age;
11958
11959         aso_age = flow_aso_age_get_by_idx(dev, age_idx);
11960         MLX5_ASSERT(aso_age);
11961         aso_age->age_params.context = context;
11962         aso_age->age_params.timeout = timeout;
11963         aso_age->age_params.port_id = dev->data->port_id;
11964         __atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0,
11965                          __ATOMIC_RELAXED);
11966         __atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE,
11967                          __ATOMIC_RELAXED);
11968 }
11969
11970 static void
11971 flow_dv_translate_integrity_l4(const struct rte_flow_item_integrity *mask,
11972                                const struct rte_flow_item_integrity *value,
11973                                void *headers_m, void *headers_v)
11974 {
11975         if (mask->l4_ok) {
11976                 /* application l4_ok filter aggregates all hardware l4 filters
11977                  * therefore hw l4_checksum_ok must be implicitly added here.
11978                  */
11979                 struct rte_flow_item_integrity local_item;
11980
11981                 local_item.l4_csum_ok = 1;
11982                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok,
11983                          local_item.l4_csum_ok);
11984                 if (value->l4_ok) {
11985                         /* application l4_ok = 1 matches sets both hw flags
11986                          * l4_ok and l4_checksum_ok flags to 1.
11987                          */
11988                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
11989                                  l4_checksum_ok, local_item.l4_csum_ok);
11990                         MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_ok,
11991                                  mask->l4_ok);
11992                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_ok,
11993                                  value->l4_ok);
11994                 } else {
11995                         /* application l4_ok = 0 matches on hw flag
11996                          * l4_checksum_ok = 0 only.
11997                          */
11998                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
11999                                  l4_checksum_ok, 0);
12000                 }
12001         } else if (mask->l4_csum_ok) {
12002                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok,
12003                          mask->l4_csum_ok);
12004                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_checksum_ok,
12005                          value->l4_csum_ok);
12006         }
12007 }
12008
12009 static void
12010 flow_dv_translate_integrity_l3(const struct rte_flow_item_integrity *mask,
12011                                const struct rte_flow_item_integrity *value,
12012                                void *headers_m, void *headers_v,
12013                                bool is_ipv4)
12014 {
12015         if (mask->l3_ok) {
12016                 /* application l3_ok filter aggregates all hardware l3 filters
12017                  * therefore hw ipv4_checksum_ok must be implicitly added here.
12018                  */
12019                 struct rte_flow_item_integrity local_item;
12020
12021                 local_item.ipv4_csum_ok = !!is_ipv4;
12022                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok,
12023                          local_item.ipv4_csum_ok);
12024                 if (value->l3_ok) {
12025                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
12026                                  ipv4_checksum_ok, local_item.ipv4_csum_ok);
12027                         MLX5_SET(fte_match_set_lyr_2_4, headers_m, l3_ok,
12028                                  mask->l3_ok);
12029                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, l3_ok,
12030                                  value->l3_ok);
12031                 } else {
12032                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
12033                                  ipv4_checksum_ok, 0);
12034                 }
12035         } else if (mask->ipv4_csum_ok) {
12036                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok,
12037                          mask->ipv4_csum_ok);
12038                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_checksum_ok,
12039                          value->ipv4_csum_ok);
12040         }
12041 }
12042
12043 static void
12044 flow_dv_translate_item_integrity(void *matcher, void *key,
12045                                  const struct rte_flow_item *head_item,
12046                                  const struct rte_flow_item *integrity_item)
12047 {
12048         const struct rte_flow_item_integrity *mask = integrity_item->mask;
12049         const struct rte_flow_item_integrity *value = integrity_item->spec;
12050         const struct rte_flow_item *tunnel_item, *end_item, *item;
12051         void *headers_m;
12052         void *headers_v;
12053         uint32_t l3_protocol;
12054
12055         if (!value)
12056                 return;
12057         if (!mask)
12058                 mask = &rte_flow_item_integrity_mask;
12059         if (value->level > 1) {
12060                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
12061                                          inner_headers);
12062                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
12063         } else {
12064                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
12065                                          outer_headers);
12066                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
12067         }
12068         tunnel_item = mlx5_flow_find_tunnel_item(head_item);
12069         if (value->level > 1) {
12070                 /* tunnel item was verified during the item validation */
12071                 item = tunnel_item;
12072                 end_item = mlx5_find_end_item(tunnel_item);
12073         } else {
12074                 item = head_item;
12075                 end_item = tunnel_item ? tunnel_item :
12076                            mlx5_find_end_item(integrity_item);
12077         }
12078         l3_protocol = mask->l3_ok ?
12079                       mlx5_flow_locate_proto_l3(&item, end_item) : 0;
12080         flow_dv_translate_integrity_l3(mask, value, headers_m, headers_v,
12081                                        l3_protocol == RTE_ETHER_TYPE_IPV4);
12082         flow_dv_translate_integrity_l4(mask, value, headers_m, headers_v);
12083 }
12084
12085 /**
12086  * Prepares DV flow counter with aging configuration.
12087  * Gets it by index when exists, creates a new one when doesn't.
12088  *
12089  * @param[in] dev
12090  *   Pointer to rte_eth_dev structure.
12091  * @param[in] dev_flow
12092  *   Pointer to the mlx5_flow.
12093  * @param[in, out] flow
12094  *   Pointer to the sub flow.
12095  * @param[in] count
12096  *   Pointer to the counter action configuration.
12097  * @param[in] age
12098  *   Pointer to the aging action configuration.
12099  * @param[out] error
12100  *   Pointer to the error structure.
12101  *
12102  * @return
12103  *   Pointer to the counter, NULL otherwise.
12104  */
12105 static struct mlx5_flow_counter *
12106 flow_dv_prepare_counter(struct rte_eth_dev *dev,
12107                         struct mlx5_flow *dev_flow,
12108                         struct rte_flow *flow,
12109                         const struct rte_flow_action_count *count,
12110                         const struct rte_flow_action_age *age,
12111                         struct rte_flow_error *error)
12112 {
12113         if (!flow->counter) {
12114                 flow->counter = flow_dv_translate_create_counter(dev, dev_flow,
12115                                                                  count, age);
12116                 if (!flow->counter) {
12117                         rte_flow_error_set(error, rte_errno,
12118                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12119                                            "cannot create counter object.");
12120                         return NULL;
12121                 }
12122         }
12123         return flow_dv_counter_get_by_idx(dev, flow->counter, NULL);
12124 }
12125
12126 /*
12127  * Release an ASO CT action by its own device.
12128  *
12129  * @param[in] dev
12130  *   Pointer to the Ethernet device structure.
12131  * @param[in] idx
12132  *   Index of ASO CT action to release.
12133  *
12134  * @return
12135  *   0 when CT action was removed, otherwise the number of references.
12136  */
12137 static inline int
12138 flow_dv_aso_ct_dev_release(struct rte_eth_dev *dev, uint32_t idx)
12139 {
12140         struct mlx5_priv *priv = dev->data->dev_private;
12141         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12142         uint32_t ret;
12143         struct mlx5_aso_ct_action *ct = flow_aso_ct_get_by_dev_idx(dev, idx);
12144         enum mlx5_aso_ct_state state =
12145                         __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
12146
12147         /* Cannot release when CT is in the ASO SQ. */
12148         if (state == ASO_CONNTRACK_WAIT || state == ASO_CONNTRACK_QUERY)
12149                 return -1;
12150         ret = __atomic_sub_fetch(&ct->refcnt, 1, __ATOMIC_RELAXED);
12151         if (!ret) {
12152                 if (ct->dr_action_orig) {
12153 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12154                         claim_zero(mlx5_glue->destroy_flow_action
12155                                         (ct->dr_action_orig));
12156 #endif
12157                         ct->dr_action_orig = NULL;
12158                 }
12159                 if (ct->dr_action_rply) {
12160 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12161                         claim_zero(mlx5_glue->destroy_flow_action
12162                                         (ct->dr_action_rply));
12163 #endif
12164                         ct->dr_action_rply = NULL;
12165                 }
12166                 /* Clear the state to free, no need in 1st allocation. */
12167                 MLX5_ASO_CT_UPDATE_STATE(ct, ASO_CONNTRACK_FREE);
12168                 rte_spinlock_lock(&mng->ct_sl);
12169                 LIST_INSERT_HEAD(&mng->free_cts, ct, next);
12170                 rte_spinlock_unlock(&mng->ct_sl);
12171         }
12172         return (int)ret;
12173 }
12174
12175 static inline int
12176 flow_dv_aso_ct_release(struct rte_eth_dev *dev, uint32_t own_idx)
12177 {
12178         uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(own_idx);
12179         uint32_t idx = MLX5_INDIRECT_ACT_CT_GET_IDX(own_idx);
12180         struct rte_eth_dev *owndev = &rte_eth_devices[owner];
12181         RTE_SET_USED(dev);
12182
12183         MLX5_ASSERT(owner < RTE_MAX_ETHPORTS);
12184         if (dev->data->dev_started != 1)
12185                 return -1;
12186         return flow_dv_aso_ct_dev_release(owndev, idx);
12187 }
12188
12189 /*
12190  * Resize the ASO CT pools array by 64 pools.
12191  *
12192  * @param[in] dev
12193  *   Pointer to the Ethernet device structure.
12194  *
12195  * @return
12196  *   0 on success, otherwise negative errno value and rte_errno is set.
12197  */
12198 static int
12199 flow_dv_aso_ct_pools_resize(struct rte_eth_dev *dev)
12200 {
12201         struct mlx5_priv *priv = dev->data->dev_private;
12202         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12203         void *old_pools = mng->pools;
12204         /* Magic number now, need a macro. */
12205         uint32_t resize = mng->n + 64;
12206         uint32_t mem_size = sizeof(struct mlx5_aso_ct_pool *) * resize;
12207         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
12208
12209         if (!pools) {
12210                 rte_errno = ENOMEM;
12211                 return -rte_errno;
12212         }
12213         rte_rwlock_write_lock(&mng->resize_rwl);
12214         /* ASO SQ/QP was already initialized in the startup. */
12215         if (old_pools) {
12216                 /* Realloc could be an alternative choice. */
12217                 rte_memcpy(pools, old_pools,
12218                            mng->n * sizeof(struct mlx5_aso_ct_pool *));
12219                 mlx5_free(old_pools);
12220         }
12221         mng->n = resize;
12222         mng->pools = pools;
12223         rte_rwlock_write_unlock(&mng->resize_rwl);
12224         return 0;
12225 }
12226
12227 /*
12228  * Create and initialize a new ASO CT pool.
12229  *
12230  * @param[in] dev
12231  *   Pointer to the Ethernet device structure.
12232  * @param[out] ct_free
12233  *   Where to put the pointer of a new CT action.
12234  *
12235  * @return
12236  *   The CT actions pool pointer and @p ct_free is set on success,
12237  *   NULL otherwise and rte_errno is set.
12238  */
12239 static struct mlx5_aso_ct_pool *
12240 flow_dv_ct_pool_create(struct rte_eth_dev *dev,
12241                        struct mlx5_aso_ct_action **ct_free)
12242 {
12243         struct mlx5_priv *priv = dev->data->dev_private;
12244         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12245         struct mlx5_aso_ct_pool *pool = NULL;
12246         struct mlx5_devx_obj *obj = NULL;
12247         uint32_t i;
12248         uint32_t log_obj_size = rte_log2_u32(MLX5_ASO_CT_ACTIONS_PER_POOL);
12249
12250         obj = mlx5_devx_cmd_create_conn_track_offload_obj(priv->sh->ctx,
12251                                                 priv->sh->pdn, log_obj_size);
12252         if (!obj) {
12253                 rte_errno = ENODATA;
12254                 DRV_LOG(ERR, "Failed to create conn_track_offload_obj using DevX.");
12255                 return NULL;
12256         }
12257         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
12258         if (!pool) {
12259                 rte_errno = ENOMEM;
12260                 claim_zero(mlx5_devx_cmd_destroy(obj));
12261                 return NULL;
12262         }
12263         pool->devx_obj = obj;
12264         pool->index = mng->next;
12265         /* Resize pools array if there is no room for the new pool in it. */
12266         if (pool->index == mng->n && flow_dv_aso_ct_pools_resize(dev)) {
12267                 claim_zero(mlx5_devx_cmd_destroy(obj));
12268                 mlx5_free(pool);
12269                 return NULL;
12270         }
12271         mng->pools[pool->index] = pool;
12272         mng->next++;
12273         /* Assign the first action in the new pool, the rest go to free list. */
12274         *ct_free = &pool->actions[0];
12275         /* Lock outside, the list operation is safe here. */
12276         for (i = 1; i < MLX5_ASO_CT_ACTIONS_PER_POOL; i++) {
12277                 /* refcnt is 0 when allocating the memory. */
12278                 pool->actions[i].offset = i;
12279                 LIST_INSERT_HEAD(&mng->free_cts, &pool->actions[i], next);
12280         }
12281         return pool;
12282 }
12283
12284 /*
12285  * Allocate a ASO CT action from free list.
12286  *
12287  * @param[in] dev
12288  *   Pointer to the Ethernet device structure.
12289  * @param[out] error
12290  *   Pointer to the error structure.
12291  *
12292  * @return
12293  *   Index to ASO CT action on success, 0 otherwise and rte_errno is set.
12294  */
12295 static uint32_t
12296 flow_dv_aso_ct_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
12297 {
12298         struct mlx5_priv *priv = dev->data->dev_private;
12299         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12300         struct mlx5_aso_ct_action *ct = NULL;
12301         struct mlx5_aso_ct_pool *pool;
12302         uint8_t reg_c;
12303         uint32_t ct_idx;
12304
12305         MLX5_ASSERT(mng);
12306         if (!priv->config.devx) {
12307                 rte_errno = ENOTSUP;
12308                 return 0;
12309         }
12310         /* Get a free CT action, if no, a new pool will be created. */
12311         rte_spinlock_lock(&mng->ct_sl);
12312         ct = LIST_FIRST(&mng->free_cts);
12313         if (ct) {
12314                 LIST_REMOVE(ct, next);
12315         } else if (!flow_dv_ct_pool_create(dev, &ct)) {
12316                 rte_spinlock_unlock(&mng->ct_sl);
12317                 rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
12318                                    NULL, "failed to create ASO CT pool");
12319                 return 0;
12320         }
12321         rte_spinlock_unlock(&mng->ct_sl);
12322         pool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);
12323         ct_idx = MLX5_MAKE_CT_IDX(pool->index, ct->offset);
12324         /* 0: inactive, 1: created, 2+: used by flows. */
12325         __atomic_store_n(&ct->refcnt, 1, __ATOMIC_RELAXED);
12326         reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, error);
12327         if (!ct->dr_action_orig) {
12328 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12329                 ct->dr_action_orig = mlx5_glue->dv_create_flow_action_aso
12330                         (priv->sh->rx_domain, pool->devx_obj->obj,
12331                          ct->offset,
12332                          MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_INITIATOR,
12333                          reg_c - REG_C_0);
12334 #else
12335                 RTE_SET_USED(reg_c);
12336 #endif
12337                 if (!ct->dr_action_orig) {
12338                         flow_dv_aso_ct_dev_release(dev, ct_idx);
12339                         rte_flow_error_set(error, rte_errno,
12340                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12341                                            "failed to create ASO CT action");
12342                         return 0;
12343                 }
12344         }
12345         if (!ct->dr_action_rply) {
12346 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12347                 ct->dr_action_rply = mlx5_glue->dv_create_flow_action_aso
12348                         (priv->sh->rx_domain, pool->devx_obj->obj,
12349                          ct->offset,
12350                          MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_RESPONDER,
12351                          reg_c - REG_C_0);
12352 #endif
12353                 if (!ct->dr_action_rply) {
12354                         flow_dv_aso_ct_dev_release(dev, ct_idx);
12355                         rte_flow_error_set(error, rte_errno,
12356                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12357                                            "failed to create ASO CT action");
12358                         return 0;
12359                 }
12360         }
12361         return ct_idx;
12362 }
12363
12364 /*
12365  * Create a conntrack object with context and actions by using ASO mechanism.
12366  *
12367  * @param[in] dev
12368  *   Pointer to rte_eth_dev structure.
12369  * @param[in] pro
12370  *   Pointer to conntrack information profile.
12371  * @param[out] error
12372  *   Pointer to the error structure.
12373  *
12374  * @return
12375  *   Index to conntrack object on success, 0 otherwise.
12376  */
12377 static uint32_t
12378 flow_dv_translate_create_conntrack(struct rte_eth_dev *dev,
12379                                    const struct rte_flow_action_conntrack *pro,
12380                                    struct rte_flow_error *error)
12381 {
12382         struct mlx5_priv *priv = dev->data->dev_private;
12383         struct mlx5_dev_ctx_shared *sh = priv->sh;
12384         struct mlx5_aso_ct_action *ct;
12385         uint32_t idx;
12386
12387         if (!sh->ct_aso_en)
12388                 return rte_flow_error_set(error, ENOTSUP,
12389                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12390                                           "Connection is not supported");
12391         idx = flow_dv_aso_ct_alloc(dev, error);
12392         if (!idx)
12393                 return rte_flow_error_set(error, rte_errno,
12394                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12395                                           "Failed to allocate CT object");
12396         ct = flow_aso_ct_get_by_dev_idx(dev, idx);
12397         if (mlx5_aso_ct_update_by_wqe(sh, ct, pro))
12398                 return rte_flow_error_set(error, EBUSY,
12399                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12400                                           "Failed to update CT");
12401         ct->is_original = !!pro->is_original_dir;
12402         ct->peer = pro->peer_port;
12403         return idx;
12404 }
12405
12406 /**
12407  * Fill the flow with DV spec, lock free
12408  * (mutex should be acquired by caller).
12409  *
12410  * @param[in] dev
12411  *   Pointer to rte_eth_dev structure.
12412  * @param[in, out] dev_flow
12413  *   Pointer to the sub flow.
12414  * @param[in] attr
12415  *   Pointer to the flow attributes.
12416  * @param[in] items
12417  *   Pointer to the list of items.
12418  * @param[in] actions
12419  *   Pointer to the list of actions.
12420  * @param[out] error
12421  *   Pointer to the error structure.
12422  *
12423  * @return
12424  *   0 on success, a negative errno value otherwise and rte_errno is set.
12425  */
12426 static int
12427 flow_dv_translate(struct rte_eth_dev *dev,
12428                   struct mlx5_flow *dev_flow,
12429                   const struct rte_flow_attr *attr,
12430                   const struct rte_flow_item items[],
12431                   const struct rte_flow_action actions[],
12432                   struct rte_flow_error *error)
12433 {
12434         struct mlx5_priv *priv = dev->data->dev_private;
12435         struct mlx5_dev_config *dev_conf = &priv->config;
12436         struct rte_flow *flow = dev_flow->flow;
12437         struct mlx5_flow_handle *handle = dev_flow->handle;
12438         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
12439         struct mlx5_flow_rss_desc *rss_desc;
12440         uint64_t item_flags = 0;
12441         uint64_t last_item = 0;
12442         uint64_t action_flags = 0;
12443         struct mlx5_flow_dv_matcher matcher = {
12444                 .mask = {
12445                         .size = sizeof(matcher.mask.buf),
12446                 },
12447         };
12448         int actions_n = 0;
12449         bool actions_end = false;
12450         union {
12451                 struct mlx5_flow_dv_modify_hdr_resource res;
12452                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
12453                             sizeof(struct mlx5_modification_cmd) *
12454                             (MLX5_MAX_MODIFY_NUM + 1)];
12455         } mhdr_dummy;
12456         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
12457         const struct rte_flow_action_count *count = NULL;
12458         const struct rte_flow_action_age *non_shared_age = NULL;
12459         union flow_dv_attr flow_attr = { .attr = 0 };
12460         uint32_t tag_be;
12461         union mlx5_flow_tbl_key tbl_key;
12462         uint32_t modify_action_position = UINT32_MAX;
12463         void *match_mask = matcher.mask.buf;
12464         void *match_value = dev_flow->dv.value.buf;
12465         uint8_t next_protocol = 0xff;
12466         struct rte_vlan_hdr vlan = { 0 };
12467         struct mlx5_flow_dv_dest_array_resource mdest_res;
12468         struct mlx5_flow_dv_sample_resource sample_res;
12469         void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
12470         const struct rte_flow_action_sample *sample = NULL;
12471         struct mlx5_flow_sub_actions_list *sample_act;
12472         uint32_t sample_act_pos = UINT32_MAX;
12473         uint32_t age_act_pos = UINT32_MAX;
12474         uint32_t num_of_dest = 0;
12475         int tmp_actions_n = 0;
12476         uint32_t table;
12477         int ret = 0;
12478         const struct mlx5_flow_tunnel *tunnel = NULL;
12479         struct flow_grp_info grp_info = {
12480                 .external = !!dev_flow->external,
12481                 .transfer = !!attr->transfer,
12482                 .fdb_def_rule = !!priv->fdb_def_rule,
12483                 .skip_scale = dev_flow->skip_scale &
12484                         (1 << MLX5_SCALE_FLOW_GROUP_BIT),
12485                 .std_tbl_fix = true,
12486         };
12487         const struct rte_flow_item *head_item = items;
12488
12489         if (!wks)
12490                 return rte_flow_error_set(error, ENOMEM,
12491                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12492                                           NULL,
12493                                           "failed to push flow workspace");
12494         rss_desc = &wks->rss_desc;
12495         memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
12496         memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
12497         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
12498                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
12499         /* update normal path action resource into last index of array */
12500         sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
12501         if (is_tunnel_offload_active(dev)) {
12502                 if (dev_flow->tunnel) {
12503                         RTE_VERIFY(dev_flow->tof_type ==
12504                                    MLX5_TUNNEL_OFFLOAD_MISS_RULE);
12505                         tunnel = dev_flow->tunnel;
12506                 } else {
12507                         tunnel = mlx5_get_tof(items, actions,
12508                                               &dev_flow->tof_type);
12509                         dev_flow->tunnel = tunnel;
12510                 }
12511                 grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
12512                                         (dev, attr, tunnel, dev_flow->tof_type);
12513         }
12514         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
12515                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
12516         ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
12517                                        &grp_info, error);
12518         if (ret)
12519                 return ret;
12520         dev_flow->dv.group = table;
12521         if (attr->transfer)
12522                 mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
12523         /* number of actions must be set to 0 in case of dirty stack. */
12524         mhdr_res->actions_num = 0;
12525         if (is_flow_tunnel_match_rule(dev_flow->tof_type)) {
12526                 /*
12527                  * do not add decap action if match rule drops packet
12528                  * HW rejects rules with decap & drop
12529                  *
12530                  * if tunnel match rule was inserted before matching tunnel set
12531                  * rule flow table used in the match rule must be registered.
12532                  * current implementation handles that in the
12533                  * flow_dv_match_register() at the function end.
12534                  */
12535                 bool add_decap = true;
12536                 const struct rte_flow_action *ptr = actions;
12537
12538                 for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
12539                         if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
12540                                 add_decap = false;
12541                                 break;
12542                         }
12543                 }
12544                 if (add_decap) {
12545                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
12546                                                            attr->transfer,
12547                                                            error))
12548                                 return -rte_errno;
12549                         dev_flow->dv.actions[actions_n++] =
12550                                         dev_flow->dv.encap_decap->action;
12551                         action_flags |= MLX5_FLOW_ACTION_DECAP;
12552                 }
12553         }
12554         for (; !actions_end ; actions++) {
12555                 const struct rte_flow_action_queue *queue;
12556                 const struct rte_flow_action_rss *rss;
12557                 const struct rte_flow_action *action = actions;
12558                 const uint8_t *rss_key;
12559                 struct mlx5_flow_tbl_resource *tbl;
12560                 struct mlx5_aso_age_action *age_act;
12561                 struct mlx5_flow_counter *cnt_act;
12562                 uint32_t port_id = 0;
12563                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
12564                 int action_type = actions->type;
12565                 const struct rte_flow_action *found_action = NULL;
12566                 uint32_t jump_group = 0;
12567                 uint32_t owner_idx;
12568                 struct mlx5_aso_ct_action *ct;
12569
12570                 if (!mlx5_flow_os_action_supported(action_type))
12571                         return rte_flow_error_set(error, ENOTSUP,
12572                                                   RTE_FLOW_ERROR_TYPE_ACTION,
12573                                                   actions,
12574                                                   "action not supported");
12575                 switch (action_type) {
12576                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
12577                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
12578                         break;
12579                 case RTE_FLOW_ACTION_TYPE_VOID:
12580                         break;
12581                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
12582                         if (flow_dv_translate_action_port_id(dev, action,
12583                                                              &port_id, error))
12584                                 return -rte_errno;
12585                         port_id_resource.port_id = port_id;
12586                         MLX5_ASSERT(!handle->rix_port_id_action);
12587                         if (flow_dv_port_id_action_resource_register
12588                             (dev, &port_id_resource, dev_flow, error))
12589                                 return -rte_errno;
12590                         dev_flow->dv.actions[actions_n++] =
12591                                         dev_flow->dv.port_id_action->action;
12592                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
12593                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
12594                         sample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID;
12595                         num_of_dest++;
12596                         break;
12597                 case RTE_FLOW_ACTION_TYPE_FLAG:
12598                         action_flags |= MLX5_FLOW_ACTION_FLAG;
12599                         dev_flow->handle->mark = 1;
12600                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
12601                                 struct rte_flow_action_mark mark = {
12602                                         .id = MLX5_FLOW_MARK_DEFAULT,
12603                                 };
12604
12605                                 if (flow_dv_convert_action_mark(dev, &mark,
12606                                                                 mhdr_res,
12607                                                                 error))
12608                                         return -rte_errno;
12609                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
12610                                 break;
12611                         }
12612                         tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
12613                         /*
12614                          * Only one FLAG or MARK is supported per device flow
12615                          * right now. So the pointer to the tag resource must be
12616                          * zero before the register process.
12617                          */
12618                         MLX5_ASSERT(!handle->dvh.rix_tag);
12619                         if (flow_dv_tag_resource_register(dev, tag_be,
12620                                                           dev_flow, error))
12621                                 return -rte_errno;
12622                         MLX5_ASSERT(dev_flow->dv.tag_resource);
12623                         dev_flow->dv.actions[actions_n++] =
12624                                         dev_flow->dv.tag_resource->action;
12625                         break;
12626                 case RTE_FLOW_ACTION_TYPE_MARK:
12627                         action_flags |= MLX5_FLOW_ACTION_MARK;
12628                         dev_flow->handle->mark = 1;
12629                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
12630                                 const struct rte_flow_action_mark *mark =
12631                                         (const struct rte_flow_action_mark *)
12632                                                 actions->conf;
12633
12634                                 if (flow_dv_convert_action_mark(dev, mark,
12635                                                                 mhdr_res,
12636                                                                 error))
12637                                         return -rte_errno;
12638                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
12639                                 break;
12640                         }
12641                         /* Fall-through */
12642                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
12643                         /* Legacy (non-extensive) MARK action. */
12644                         tag_be = mlx5_flow_mark_set
12645                               (((const struct rte_flow_action_mark *)
12646                                (actions->conf))->id);
12647                         MLX5_ASSERT(!handle->dvh.rix_tag);
12648                         if (flow_dv_tag_resource_register(dev, tag_be,
12649                                                           dev_flow, error))
12650                                 return -rte_errno;
12651                         MLX5_ASSERT(dev_flow->dv.tag_resource);
12652                         dev_flow->dv.actions[actions_n++] =
12653                                         dev_flow->dv.tag_resource->action;
12654                         break;
12655                 case RTE_FLOW_ACTION_TYPE_SET_META:
12656                         if (flow_dv_convert_action_set_meta
12657                                 (dev, mhdr_res, attr,
12658                                  (const struct rte_flow_action_set_meta *)
12659                                   actions->conf, error))
12660                                 return -rte_errno;
12661                         action_flags |= MLX5_FLOW_ACTION_SET_META;
12662                         break;
12663                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
12664                         if (flow_dv_convert_action_set_tag
12665                                 (dev, mhdr_res,
12666                                  (const struct rte_flow_action_set_tag *)
12667                                   actions->conf, error))
12668                                 return -rte_errno;
12669                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
12670                         break;
12671                 case RTE_FLOW_ACTION_TYPE_DROP:
12672                         action_flags |= MLX5_FLOW_ACTION_DROP;
12673                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
12674                         break;
12675                 case RTE_FLOW_ACTION_TYPE_QUEUE:
12676                         queue = actions->conf;
12677                         rss_desc->queue_num = 1;
12678                         rss_desc->queue[0] = queue->index;
12679                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
12680                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
12681                         sample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE;
12682                         num_of_dest++;
12683                         break;
12684                 case RTE_FLOW_ACTION_TYPE_RSS:
12685                         rss = actions->conf;
12686                         memcpy(rss_desc->queue, rss->queue,
12687                                rss->queue_num * sizeof(uint16_t));
12688                         rss_desc->queue_num = rss->queue_num;
12689                         /* NULL RSS key indicates default RSS key. */
12690                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
12691                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
12692                         /*
12693                          * rss->level and rss.types should be set in advance
12694                          * when expanding items for RSS.
12695                          */
12696                         action_flags |= MLX5_FLOW_ACTION_RSS;
12697                         dev_flow->handle->fate_action = rss_desc->shared_rss ?
12698                                 MLX5_FLOW_FATE_SHARED_RSS :
12699                                 MLX5_FLOW_FATE_QUEUE;
12700                         break;
12701                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
12702                         flow->age = (uint32_t)(uintptr_t)(action->conf);
12703                         age_act = flow_aso_age_get_by_idx(dev, flow->age);
12704                         __atomic_fetch_add(&age_act->refcnt, 1,
12705                                            __ATOMIC_RELAXED);
12706                         age_act_pos = actions_n++;
12707                         action_flags |= MLX5_FLOW_ACTION_AGE;
12708                         break;
12709                 case RTE_FLOW_ACTION_TYPE_AGE:
12710                         non_shared_age = action->conf;
12711                         age_act_pos = actions_n++;
12712                         action_flags |= MLX5_FLOW_ACTION_AGE;
12713                         break;
12714                 case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
12715                         flow->counter = (uint32_t)(uintptr_t)(action->conf);
12716                         cnt_act = flow_dv_counter_get_by_idx(dev, flow->counter,
12717                                                              NULL);
12718                         __atomic_fetch_add(&cnt_act->shared_info.refcnt, 1,
12719                                            __ATOMIC_RELAXED);
12720                         /* Save information first, will apply later. */
12721                         action_flags |= MLX5_FLOW_ACTION_COUNT;
12722                         break;
12723                 case RTE_FLOW_ACTION_TYPE_COUNT:
12724                         if (!dev_conf->devx) {
12725                                 return rte_flow_error_set
12726                                               (error, ENOTSUP,
12727                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12728                                                NULL,
12729                                                "count action not supported");
12730                         }
12731                         /* Save information first, will apply later. */
12732                         count = action->conf;
12733                         action_flags |= MLX5_FLOW_ACTION_COUNT;
12734                         break;
12735                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
12736                         dev_flow->dv.actions[actions_n++] =
12737                                                 priv->sh->pop_vlan_action;
12738                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
12739                         break;
12740                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
12741                         if (!(action_flags &
12742                               MLX5_FLOW_ACTION_OF_SET_VLAN_VID))
12743                                 flow_dev_get_vlan_info_from_items(items, &vlan);
12744                         vlan.eth_proto = rte_be_to_cpu_16
12745                              ((((const struct rte_flow_action_of_push_vlan *)
12746                                                    actions->conf)->ethertype));
12747                         found_action = mlx5_flow_find_action
12748                                         (actions + 1,
12749                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
12750                         if (found_action)
12751                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
12752                         found_action = mlx5_flow_find_action
12753                                         (actions + 1,
12754                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
12755                         if (found_action)
12756                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
12757                         if (flow_dv_create_action_push_vlan
12758                                             (dev, attr, &vlan, dev_flow, error))
12759                                 return -rte_errno;
12760                         dev_flow->dv.actions[actions_n++] =
12761                                         dev_flow->dv.push_vlan_res->action;
12762                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
12763                         break;
12764                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
12765                         /* of_vlan_push action handled this action */
12766                         MLX5_ASSERT(action_flags &
12767                                     MLX5_FLOW_ACTION_OF_PUSH_VLAN);
12768                         break;
12769                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
12770                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
12771                                 break;
12772                         flow_dev_get_vlan_info_from_items(items, &vlan);
12773                         mlx5_update_vlan_vid_pcp(actions, &vlan);
12774                         /* If no VLAN push - this is a modify header action */
12775                         if (flow_dv_convert_action_modify_vlan_vid
12776                                                 (mhdr_res, actions, error))
12777                                 return -rte_errno;
12778                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
12779                         break;
12780                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
12781                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
12782                         if (flow_dv_create_action_l2_encap(dev, actions,
12783                                                            dev_flow,
12784                                                            attr->transfer,
12785                                                            error))
12786                                 return -rte_errno;
12787                         dev_flow->dv.actions[actions_n++] =
12788                                         dev_flow->dv.encap_decap->action;
12789                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
12790                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
12791                                 sample_act->action_flags |=
12792                                                         MLX5_FLOW_ACTION_ENCAP;
12793                         break;
12794                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
12795                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
12796                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
12797                                                            attr->transfer,
12798                                                            error))
12799                                 return -rte_errno;
12800                         dev_flow->dv.actions[actions_n++] =
12801                                         dev_flow->dv.encap_decap->action;
12802                         action_flags |= MLX5_FLOW_ACTION_DECAP;
12803                         break;
12804                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
12805                         /* Handle encap with preceding decap. */
12806                         if (action_flags & MLX5_FLOW_ACTION_DECAP) {
12807                                 if (flow_dv_create_action_raw_encap
12808                                         (dev, actions, dev_flow, attr, error))
12809                                         return -rte_errno;
12810                                 dev_flow->dv.actions[actions_n++] =
12811                                         dev_flow->dv.encap_decap->action;
12812                         } else {
12813                                 /* Handle encap without preceding decap. */
12814                                 if (flow_dv_create_action_l2_encap
12815                                     (dev, actions, dev_flow, attr->transfer,
12816                                      error))
12817                                         return -rte_errno;
12818                                 dev_flow->dv.actions[actions_n++] =
12819                                         dev_flow->dv.encap_decap->action;
12820                         }
12821                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
12822                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
12823                                 sample_act->action_flags |=
12824                                                         MLX5_FLOW_ACTION_ENCAP;
12825                         break;
12826                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
12827                         while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
12828                                 ;
12829                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
12830                                 if (flow_dv_create_action_l2_decap
12831                                     (dev, dev_flow, attr->transfer, error))
12832                                         return -rte_errno;
12833                                 dev_flow->dv.actions[actions_n++] =
12834                                         dev_flow->dv.encap_decap->action;
12835                         }
12836                         /* If decap is followed by encap, handle it at encap. */
12837                         action_flags |= MLX5_FLOW_ACTION_DECAP;
12838                         break;
12839                 case MLX5_RTE_FLOW_ACTION_TYPE_JUMP:
12840                         dev_flow->dv.actions[actions_n++] =
12841                                 (void *)(uintptr_t)action->conf;
12842                         action_flags |= MLX5_FLOW_ACTION_JUMP;
12843                         break;
12844                 case RTE_FLOW_ACTION_TYPE_JUMP:
12845                         jump_group = ((const struct rte_flow_action_jump *)
12846                                                         action->conf)->group;
12847                         grp_info.std_tbl_fix = 0;
12848                         if (dev_flow->skip_scale &
12849                                 (1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT))
12850                                 grp_info.skip_scale = 1;
12851                         else
12852                                 grp_info.skip_scale = 0;
12853                         ret = mlx5_flow_group_to_table(dev, tunnel,
12854                                                        jump_group,
12855                                                        &table,
12856                                                        &grp_info, error);
12857                         if (ret)
12858                                 return ret;
12859                         tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
12860                                                        attr->transfer,
12861                                                        !!dev_flow->external,
12862                                                        tunnel, jump_group, 0,
12863                                                        0, error);
12864                         if (!tbl)
12865                                 return rte_flow_error_set
12866                                                 (error, errno,
12867                                                  RTE_FLOW_ERROR_TYPE_ACTION,
12868                                                  NULL,
12869                                                  "cannot create jump action.");
12870                         if (flow_dv_jump_tbl_resource_register
12871                             (dev, tbl, dev_flow, error)) {
12872                                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
12873                                 return rte_flow_error_set
12874                                                 (error, errno,
12875                                                  RTE_FLOW_ERROR_TYPE_ACTION,
12876                                                  NULL,
12877                                                  "cannot create jump action.");
12878                         }
12879                         dev_flow->dv.actions[actions_n++] =
12880                                         dev_flow->dv.jump->action;
12881                         action_flags |= MLX5_FLOW_ACTION_JUMP;
12882                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;
12883                         sample_act->action_flags |= MLX5_FLOW_ACTION_JUMP;
12884                         num_of_dest++;
12885                         break;
12886                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
12887                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
12888                         if (flow_dv_convert_action_modify_mac
12889                                         (mhdr_res, actions, error))
12890                                 return -rte_errno;
12891                         action_flags |= actions->type ==
12892                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
12893                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
12894                                         MLX5_FLOW_ACTION_SET_MAC_DST;
12895                         break;
12896                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
12897                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
12898                         if (flow_dv_convert_action_modify_ipv4
12899                                         (mhdr_res, actions, error))
12900                                 return -rte_errno;
12901                         action_flags |= actions->type ==
12902                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
12903                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
12904                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
12905                         break;
12906                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
12907                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
12908                         if (flow_dv_convert_action_modify_ipv6
12909                                         (mhdr_res, actions, error))
12910                                 return -rte_errno;
12911                         action_flags |= actions->type ==
12912                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
12913                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
12914                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
12915                         break;
12916                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
12917                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
12918                         if (flow_dv_convert_action_modify_tp
12919                                         (mhdr_res, actions, items,
12920                                          &flow_attr, dev_flow, !!(action_flags &
12921                                          MLX5_FLOW_ACTION_DECAP), error))
12922                                 return -rte_errno;
12923                         action_flags |= actions->type ==
12924                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
12925                                         MLX5_FLOW_ACTION_SET_TP_SRC :
12926                                         MLX5_FLOW_ACTION_SET_TP_DST;
12927                         break;
12928                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
12929                         if (flow_dv_convert_action_modify_dec_ttl
12930                                         (mhdr_res, items, &flow_attr, dev_flow,
12931                                          !!(action_flags &
12932                                          MLX5_FLOW_ACTION_DECAP), error))
12933                                 return -rte_errno;
12934                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
12935                         break;
12936                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
12937                         if (flow_dv_convert_action_modify_ttl
12938                                         (mhdr_res, actions, items, &flow_attr,
12939                                          dev_flow, !!(action_flags &
12940                                          MLX5_FLOW_ACTION_DECAP), error))
12941                                 return -rte_errno;
12942                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
12943                         break;
12944                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
12945                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
12946                         if (flow_dv_convert_action_modify_tcp_seq
12947                                         (mhdr_res, actions, error))
12948                                 return -rte_errno;
12949                         action_flags |= actions->type ==
12950                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
12951                                         MLX5_FLOW_ACTION_INC_TCP_SEQ :
12952                                         MLX5_FLOW_ACTION_DEC_TCP_SEQ;
12953                         break;
12954
12955                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
12956                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
12957                         if (flow_dv_convert_action_modify_tcp_ack
12958                                         (mhdr_res, actions, error))
12959                                 return -rte_errno;
12960                         action_flags |= actions->type ==
12961                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
12962                                         MLX5_FLOW_ACTION_INC_TCP_ACK :
12963                                         MLX5_FLOW_ACTION_DEC_TCP_ACK;
12964                         break;
12965                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
12966                         if (flow_dv_convert_action_set_reg
12967                                         (mhdr_res, actions, error))
12968                                 return -rte_errno;
12969                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
12970                         break;
12971                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
12972                         if (flow_dv_convert_action_copy_mreg
12973                                         (dev, mhdr_res, actions, error))
12974                                 return -rte_errno;
12975                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
12976                         break;
12977                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
12978                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
12979                         dev_flow->handle->fate_action =
12980                                         MLX5_FLOW_FATE_DEFAULT_MISS;
12981                         break;
12982                 case RTE_FLOW_ACTION_TYPE_METER:
12983                         if (!wks->fm)
12984                                 return rte_flow_error_set(error, rte_errno,
12985                                         RTE_FLOW_ERROR_TYPE_ACTION,
12986                                         NULL, "Failed to get meter in flow.");
12987                         /* Set the meter action. */
12988                         dev_flow->dv.actions[actions_n++] =
12989                                 wks->fm->meter_action;
12990                         action_flags |= MLX5_FLOW_ACTION_METER;
12991                         break;
12992                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
12993                         if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,
12994                                                               actions, error))
12995                                 return -rte_errno;
12996                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
12997                         break;
12998                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
12999                         if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,
13000                                                               actions, error))
13001                                 return -rte_errno;
13002                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
13003                         break;
13004                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
13005                         sample_act_pos = actions_n;
13006                         sample = (const struct rte_flow_action_sample *)
13007                                  action->conf;
13008                         actions_n++;
13009                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
13010                         /* put encap action into group if work with port id */
13011                         if ((action_flags & MLX5_FLOW_ACTION_ENCAP) &&
13012                             (action_flags & MLX5_FLOW_ACTION_PORT_ID))
13013                                 sample_act->action_flags |=
13014                                                         MLX5_FLOW_ACTION_ENCAP;
13015                         break;
13016                 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
13017                         if (flow_dv_convert_action_modify_field
13018                                         (dev, mhdr_res, actions, attr, error))
13019                                 return -rte_errno;
13020                         action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
13021                         break;
13022                 case RTE_FLOW_ACTION_TYPE_CONNTRACK:
13023                         owner_idx = (uint32_t)(uintptr_t)action->conf;
13024                         ct = flow_aso_ct_get_by_idx(dev, owner_idx);
13025                         if (!ct)
13026                                 return rte_flow_error_set(error, EINVAL,
13027                                                 RTE_FLOW_ERROR_TYPE_ACTION,
13028                                                 NULL,
13029                                                 "Failed to get CT object.");
13030                         if (mlx5_aso_ct_available(priv->sh, ct))
13031                                 return rte_flow_error_set(error, rte_errno,
13032                                                 RTE_FLOW_ERROR_TYPE_ACTION,
13033                                                 NULL,
13034                                                 "CT is unavailable.");
13035                         if (ct->is_original)
13036                                 dev_flow->dv.actions[actions_n] =
13037                                                         ct->dr_action_orig;
13038                         else
13039                                 dev_flow->dv.actions[actions_n] =
13040                                                         ct->dr_action_rply;
13041                         flow->indirect_type = MLX5_INDIRECT_ACTION_TYPE_CT;
13042                         flow->ct = owner_idx;
13043                         __atomic_fetch_add(&ct->refcnt, 1, __ATOMIC_RELAXED);
13044                         actions_n++;
13045                         action_flags |= MLX5_FLOW_ACTION_CT;
13046                         break;
13047                 case RTE_FLOW_ACTION_TYPE_END:
13048                         actions_end = true;
13049                         if (mhdr_res->actions_num) {
13050                                 /* create modify action if needed. */
13051                                 if (flow_dv_modify_hdr_resource_register
13052                                         (dev, mhdr_res, dev_flow, error))
13053                                         return -rte_errno;
13054                                 dev_flow->dv.actions[modify_action_position] =
13055                                         handle->dvh.modify_hdr->action;
13056                         }
13057                         /*
13058                          * Handle AGE and COUNT action by single HW counter
13059                          * when they are not shared.
13060                          */
13061                         if (action_flags & MLX5_FLOW_ACTION_AGE) {
13062                                 if ((non_shared_age &&
13063                                      count && !count->shared) ||
13064                                     !(priv->sh->flow_hit_aso_en &&
13065                                       (attr->group || attr->transfer))) {
13066                                         /* Creates age by counters. */
13067                                         cnt_act = flow_dv_prepare_counter
13068                                                                 (dev, dev_flow,
13069                                                                  flow, count,
13070                                                                  non_shared_age,
13071                                                                  error);
13072                                         if (!cnt_act)
13073                                                 return -rte_errno;
13074                                         dev_flow->dv.actions[age_act_pos] =
13075                                                                 cnt_act->action;
13076                                         break;
13077                                 }
13078                                 if (!flow->age && non_shared_age) {
13079                                         flow->age = flow_dv_aso_age_alloc
13080                                                                 (dev, error);
13081                                         if (!flow->age)
13082                                                 return -rte_errno;
13083                                         flow_dv_aso_age_params_init
13084                                                     (dev, flow->age,
13085                                                      non_shared_age->context ?
13086                                                      non_shared_age->context :
13087                                                      (void *)(uintptr_t)
13088                                                      (dev_flow->flow_idx),
13089                                                      non_shared_age->timeout);
13090                                 }
13091                                 age_act = flow_aso_age_get_by_idx(dev,
13092                                                                   flow->age);
13093                                 dev_flow->dv.actions[age_act_pos] =
13094                                                              age_act->dr_action;
13095                         }
13096                         if (action_flags & MLX5_FLOW_ACTION_COUNT) {
13097                                 /*
13098                                  * Create one count action, to be used
13099                                  * by all sub-flows.
13100                                  */
13101                                 cnt_act = flow_dv_prepare_counter(dev, dev_flow,
13102                                                                   flow, count,
13103                                                                   NULL, error);
13104                                 if (!cnt_act)
13105                                         return -rte_errno;
13106                                 dev_flow->dv.actions[actions_n++] =
13107                                                                 cnt_act->action;
13108                         }
13109                 default:
13110                         break;
13111                 }
13112                 if (mhdr_res->actions_num &&
13113                     modify_action_position == UINT32_MAX)
13114                         modify_action_position = actions_n++;
13115         }
13116         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
13117                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
13118                 int item_type = items->type;
13119
13120                 if (!mlx5_flow_os_item_supported(item_type))
13121                         return rte_flow_error_set(error, ENOTSUP,
13122                                                   RTE_FLOW_ERROR_TYPE_ITEM,
13123                                                   NULL, "item not supported");
13124                 switch (item_type) {
13125                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
13126                         flow_dv_translate_item_port_id
13127                                 (dev, match_mask, match_value, items, attr);
13128                         last_item = MLX5_FLOW_ITEM_PORT_ID;
13129                         break;
13130                 case RTE_FLOW_ITEM_TYPE_ETH:
13131                         flow_dv_translate_item_eth(match_mask, match_value,
13132                                                    items, tunnel,
13133                                                    dev_flow->dv.group);
13134                         matcher.priority = action_flags &
13135                                         MLX5_FLOW_ACTION_DEFAULT_MISS &&
13136                                         !dev_flow->external ?
13137                                         MLX5_PRIORITY_MAP_L3 :
13138                                         MLX5_PRIORITY_MAP_L2;
13139                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
13140                                              MLX5_FLOW_LAYER_OUTER_L2;
13141                         break;
13142                 case RTE_FLOW_ITEM_TYPE_VLAN:
13143                         flow_dv_translate_item_vlan(dev_flow,
13144                                                     match_mask, match_value,
13145                                                     items, tunnel,
13146                                                     dev_flow->dv.group);
13147                         matcher.priority = MLX5_PRIORITY_MAP_L2;
13148                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
13149                                               MLX5_FLOW_LAYER_INNER_VLAN) :
13150                                              (MLX5_FLOW_LAYER_OUTER_L2 |
13151                                               MLX5_FLOW_LAYER_OUTER_VLAN);
13152                         break;
13153                 case RTE_FLOW_ITEM_TYPE_IPV4:
13154                         mlx5_flow_tunnel_ip_check(items, next_protocol,
13155                                                   &item_flags, &tunnel);
13156                         flow_dv_translate_item_ipv4(match_mask, match_value,
13157                                                     items, tunnel,
13158                                                     dev_flow->dv.group);
13159                         matcher.priority = MLX5_PRIORITY_MAP_L3;
13160                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
13161                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
13162                         if (items->mask != NULL &&
13163                             ((const struct rte_flow_item_ipv4 *)
13164                              items->mask)->hdr.next_proto_id) {
13165                                 next_protocol =
13166                                         ((const struct rte_flow_item_ipv4 *)
13167                                          (items->spec))->hdr.next_proto_id;
13168                                 next_protocol &=
13169                                         ((const struct rte_flow_item_ipv4 *)
13170                                          (items->mask))->hdr.next_proto_id;
13171                         } else {
13172                                 /* Reset for inner layer. */
13173                                 next_protocol = 0xff;
13174                         }
13175                         break;
13176                 case RTE_FLOW_ITEM_TYPE_IPV6:
13177                         mlx5_flow_tunnel_ip_check(items, next_protocol,
13178                                                   &item_flags, &tunnel);
13179                         flow_dv_translate_item_ipv6(match_mask, match_value,
13180                                                     items, tunnel,
13181                                                     dev_flow->dv.group);
13182                         matcher.priority = MLX5_PRIORITY_MAP_L3;
13183                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
13184                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
13185                         if (items->mask != NULL &&
13186                             ((const struct rte_flow_item_ipv6 *)
13187                              items->mask)->hdr.proto) {
13188                                 next_protocol =
13189                                         ((const struct rte_flow_item_ipv6 *)
13190                                          items->spec)->hdr.proto;
13191                                 next_protocol &=
13192                                         ((const struct rte_flow_item_ipv6 *)
13193                                          items->mask)->hdr.proto;
13194                         } else {
13195                                 /* Reset for inner layer. */
13196                                 next_protocol = 0xff;
13197                         }
13198                         break;
13199                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
13200                         flow_dv_translate_item_ipv6_frag_ext(match_mask,
13201                                                              match_value,
13202                                                              items, tunnel);
13203                         last_item = tunnel ?
13204                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
13205                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
13206                         if (items->mask != NULL &&
13207                             ((const struct rte_flow_item_ipv6_frag_ext *)
13208                              items->mask)->hdr.next_header) {
13209                                 next_protocol =
13210                                 ((const struct rte_flow_item_ipv6_frag_ext *)
13211                                  items->spec)->hdr.next_header;
13212                                 next_protocol &=
13213                                 ((const struct rte_flow_item_ipv6_frag_ext *)
13214                                  items->mask)->hdr.next_header;
13215                         } else {
13216                                 /* Reset for inner layer. */
13217                                 next_protocol = 0xff;
13218                         }
13219                         break;
13220                 case RTE_FLOW_ITEM_TYPE_TCP:
13221                         flow_dv_translate_item_tcp(match_mask, match_value,
13222                                                    items, tunnel);
13223                         matcher.priority = MLX5_PRIORITY_MAP_L4;
13224                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
13225                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
13226                         break;
13227                 case RTE_FLOW_ITEM_TYPE_UDP:
13228                         flow_dv_translate_item_udp(match_mask, match_value,
13229                                                    items, tunnel);
13230                         matcher.priority = MLX5_PRIORITY_MAP_L4;
13231                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
13232                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
13233                         break;
13234                 case RTE_FLOW_ITEM_TYPE_GRE:
13235                         flow_dv_translate_item_gre(match_mask, match_value,
13236                                                    items, tunnel);
13237                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13238                         last_item = MLX5_FLOW_LAYER_GRE;
13239                         break;
13240                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
13241                         flow_dv_translate_item_gre_key(match_mask,
13242                                                        match_value, items);
13243                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
13244                         break;
13245                 case RTE_FLOW_ITEM_TYPE_NVGRE:
13246                         flow_dv_translate_item_nvgre(match_mask, match_value,
13247                                                      items, tunnel);
13248                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13249                         last_item = MLX5_FLOW_LAYER_GRE;
13250                         break;
13251                 case RTE_FLOW_ITEM_TYPE_VXLAN:
13252                         flow_dv_translate_item_vxlan(dev, attr,
13253                                                      match_mask, match_value,
13254                                                      items, tunnel);
13255                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13256                         last_item = MLX5_FLOW_LAYER_VXLAN;
13257                         break;
13258                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
13259                         flow_dv_translate_item_vxlan_gpe(match_mask,
13260                                                          match_value, items,
13261                                                          tunnel);
13262                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13263                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
13264                         break;
13265                 case RTE_FLOW_ITEM_TYPE_GENEVE:
13266                         flow_dv_translate_item_geneve(match_mask, match_value,
13267                                                       items, tunnel);
13268                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13269                         last_item = MLX5_FLOW_LAYER_GENEVE;
13270                         break;
13271                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
13272                         ret = flow_dv_translate_item_geneve_opt(dev, match_mask,
13273                                                           match_value,
13274                                                           items, error);
13275                         if (ret)
13276                                 return rte_flow_error_set(error, -ret,
13277                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
13278                                         "cannot create GENEVE TLV option");
13279                         flow->geneve_tlv_option = 1;
13280                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
13281                         break;
13282                 case RTE_FLOW_ITEM_TYPE_MPLS:
13283                         flow_dv_translate_item_mpls(match_mask, match_value,
13284                                                     items, last_item, tunnel);
13285                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13286                         last_item = MLX5_FLOW_LAYER_MPLS;
13287                         break;
13288                 case RTE_FLOW_ITEM_TYPE_MARK:
13289                         flow_dv_translate_item_mark(dev, match_mask,
13290                                                     match_value, items);
13291                         last_item = MLX5_FLOW_ITEM_MARK;
13292                         break;
13293                 case RTE_FLOW_ITEM_TYPE_META:
13294                         flow_dv_translate_item_meta(dev, match_mask,
13295                                                     match_value, attr, items);
13296                         last_item = MLX5_FLOW_ITEM_METADATA;
13297                         break;
13298                 case RTE_FLOW_ITEM_TYPE_ICMP:
13299                         flow_dv_translate_item_icmp(match_mask, match_value,
13300                                                     items, tunnel);
13301                         last_item = MLX5_FLOW_LAYER_ICMP;
13302                         break;
13303                 case RTE_FLOW_ITEM_TYPE_ICMP6:
13304                         flow_dv_translate_item_icmp6(match_mask, match_value,
13305                                                       items, tunnel);
13306                         last_item = MLX5_FLOW_LAYER_ICMP6;
13307                         break;
13308                 case RTE_FLOW_ITEM_TYPE_TAG:
13309                         flow_dv_translate_item_tag(dev, match_mask,
13310                                                    match_value, items);
13311                         last_item = MLX5_FLOW_ITEM_TAG;
13312                         break;
13313                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
13314                         flow_dv_translate_mlx5_item_tag(dev, match_mask,
13315                                                         match_value, items);
13316                         last_item = MLX5_FLOW_ITEM_TAG;
13317                         break;
13318                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
13319                         flow_dv_translate_item_tx_queue(dev, match_mask,
13320                                                         match_value,
13321                                                         items);
13322                         last_item = MLX5_FLOW_ITEM_TX_QUEUE;
13323                         break;
13324                 case RTE_FLOW_ITEM_TYPE_GTP:
13325                         flow_dv_translate_item_gtp(match_mask, match_value,
13326                                                    items, tunnel);
13327                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13328                         last_item = MLX5_FLOW_LAYER_GTP;
13329                         break;
13330                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
13331                         ret = flow_dv_translate_item_gtp_psc(match_mask,
13332                                                           match_value,
13333                                                           items);
13334                         if (ret)
13335                                 return rte_flow_error_set(error, -ret,
13336                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
13337                                         "cannot create GTP PSC item");
13338                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
13339                         break;
13340                 case RTE_FLOW_ITEM_TYPE_ECPRI:
13341                         if (!mlx5_flex_parser_ecpri_exist(dev)) {
13342                                 /* Create it only the first time to be used. */
13343                                 ret = mlx5_flex_parser_ecpri_alloc(dev);
13344                                 if (ret)
13345                                         return rte_flow_error_set
13346                                                 (error, -ret,
13347                                                 RTE_FLOW_ERROR_TYPE_ITEM,
13348                                                 NULL,
13349                                                 "cannot create eCPRI parser");
13350                         }
13351                         flow_dv_translate_item_ecpri(dev, match_mask,
13352                                                      match_value, items);
13353                         /* No other protocol should follow eCPRI layer. */
13354                         last_item = MLX5_FLOW_LAYER_ECPRI;
13355                         break;
13356                 case RTE_FLOW_ITEM_TYPE_INTEGRITY:
13357                         flow_dv_translate_item_integrity(match_mask,
13358                                                          match_value,
13359                                                          head_item, items);
13360                         break;
13361                 case RTE_FLOW_ITEM_TYPE_CONNTRACK:
13362                         flow_dv_translate_item_aso_ct(dev, match_mask,
13363                                                       match_value, items);
13364                         break;
13365                 default:
13366                         break;
13367                 }
13368                 item_flags |= last_item;
13369         }
13370         /*
13371          * When E-Switch mode is enabled, we have two cases where we need to
13372          * set the source port manually.
13373          * The first one, is in case of Nic steering rule, and the second is
13374          * E-Switch rule where no port_id item was found. In both cases
13375          * the source port is set according the current port in use.
13376          */
13377         if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
13378             (priv->representor || priv->master)) {
13379                 if (flow_dv_translate_item_port_id(dev, match_mask,
13380                                                    match_value, NULL, attr))
13381                         return -rte_errno;
13382         }
13383 #ifdef RTE_LIBRTE_MLX5_DEBUG
13384         MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
13385                                               dev_flow->dv.value.buf));
13386 #endif
13387         /*
13388          * Layers may be already initialized from prefix flow if this dev_flow
13389          * is the suffix flow.
13390          */
13391         handle->layers |= item_flags;
13392         if (action_flags & MLX5_FLOW_ACTION_RSS)
13393                 flow_dv_hashfields_set(dev_flow, rss_desc);
13394         /* If has RSS action in the sample action, the Sample/Mirror resource
13395          * should be registered after the hash filed be update.
13396          */
13397         if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
13398                 ret = flow_dv_translate_action_sample(dev,
13399                                                       sample,
13400                                                       dev_flow, attr,
13401                                                       &num_of_dest,
13402                                                       sample_actions,
13403                                                       &sample_res,
13404                                                       error);
13405                 if (ret < 0)
13406                         return ret;
13407                 ret = flow_dv_create_action_sample(dev,
13408                                                    dev_flow,
13409                                                    num_of_dest,
13410                                                    &sample_res,
13411                                                    &mdest_res,
13412                                                    sample_actions,
13413                                                    action_flags,
13414                                                    error);
13415                 if (ret < 0)
13416                         return rte_flow_error_set
13417                                                 (error, rte_errno,
13418                                                 RTE_FLOW_ERROR_TYPE_ACTION,
13419                                                 NULL,
13420                                                 "cannot create sample action");
13421                 if (num_of_dest > 1) {
13422                         dev_flow->dv.actions[sample_act_pos] =
13423                         dev_flow->dv.dest_array_res->action;
13424                 } else {
13425                         dev_flow->dv.actions[sample_act_pos] =
13426                         dev_flow->dv.sample_res->verbs_action;
13427                 }
13428         }
13429         /*
13430          * For multiple destination (sample action with ratio=1), the encap
13431          * action and port id action will be combined into group action.
13432          * So need remove the original these actions in the flow and only
13433          * use the sample action instead of.
13434          */
13435         if (num_of_dest > 1 &&
13436             (sample_act->dr_port_id_action || sample_act->dr_jump_action)) {
13437                 int i;
13438                 void *temp_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
13439
13440                 for (i = 0; i < actions_n; i++) {
13441                         if ((sample_act->dr_encap_action &&
13442                                 sample_act->dr_encap_action ==
13443                                 dev_flow->dv.actions[i]) ||
13444                                 (sample_act->dr_port_id_action &&
13445                                 sample_act->dr_port_id_action ==
13446                                 dev_flow->dv.actions[i]) ||
13447                                 (sample_act->dr_jump_action &&
13448                                 sample_act->dr_jump_action ==
13449                                 dev_flow->dv.actions[i]))
13450                                 continue;
13451                         temp_actions[tmp_actions_n++] = dev_flow->dv.actions[i];
13452                 }
13453                 memcpy((void *)dev_flow->dv.actions,
13454                                 (void *)temp_actions,
13455                                 tmp_actions_n * sizeof(void *));
13456                 actions_n = tmp_actions_n;
13457         }
13458         dev_flow->dv.actions_n = actions_n;
13459         dev_flow->act_flags = action_flags;
13460         if (wks->skip_matcher_reg)
13461                 return 0;
13462         /* Register matcher. */
13463         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
13464                                     matcher.mask.size);
13465         matcher.priority = mlx5_get_matcher_priority(dev, attr,
13466                                         matcher.priority);
13467         /**
13468          * When creating meter drop flow in drop table, using original
13469          * 5-tuple match, the matcher priority should be lower than
13470          * mtr_id matcher.
13471          */
13472         if (attr->group == MLX5_FLOW_TABLE_LEVEL_METER &&
13473             dev_flow->dv.table_id == MLX5_MTR_TABLE_ID_DROP &&
13474             matcher.priority <= MLX5_REG_BITS)
13475                 matcher.priority += MLX5_REG_BITS;
13476         /* reserved field no needs to be set to 0 here. */
13477         tbl_key.is_fdb = attr->transfer;
13478         tbl_key.is_egress = attr->egress;
13479         tbl_key.level = dev_flow->dv.group;
13480         tbl_key.id = dev_flow->dv.table_id;
13481         if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow,
13482                                      tunnel, attr->group, error))
13483                 return -rte_errno;
13484         return 0;
13485 }
13486
13487 /**
13488  * Set hash RX queue by hash fields (see enum ibv_rx_hash_fields)
13489  * and tunnel.
13490  *
13491  * @param[in, out] action
13492  *   Shred RSS action holding hash RX queue objects.
13493  * @param[in] hash_fields
13494  *   Defines combination of packet fields to participate in RX hash.
13495  * @param[in] tunnel
13496  *   Tunnel type
13497  * @param[in] hrxq_idx
13498  *   Hash RX queue index to set.
13499  *
13500  * @return
13501  *   0 on success, otherwise negative errno value.
13502  */
13503 static int
13504 __flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action,
13505                               const uint64_t hash_fields,
13506                               uint32_t hrxq_idx)
13507 {
13508         uint32_t *hrxqs = action->hrxq;
13509
13510         switch (hash_fields & ~IBV_RX_HASH_INNER) {
13511         case MLX5_RSS_HASH_IPV4:
13512                 /* fall-through. */
13513         case MLX5_RSS_HASH_IPV4_DST_ONLY:
13514                 /* fall-through. */
13515         case MLX5_RSS_HASH_IPV4_SRC_ONLY:
13516                 hrxqs[0] = hrxq_idx;
13517                 return 0;
13518         case MLX5_RSS_HASH_IPV4_TCP:
13519                 /* fall-through. */
13520         case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
13521                 /* fall-through. */
13522         case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
13523                 hrxqs[1] = hrxq_idx;
13524                 return 0;
13525         case MLX5_RSS_HASH_IPV4_UDP:
13526                 /* fall-through. */
13527         case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
13528                 /* fall-through. */
13529         case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
13530                 hrxqs[2] = hrxq_idx;
13531                 return 0;
13532         case MLX5_RSS_HASH_IPV6:
13533                 /* fall-through. */
13534         case MLX5_RSS_HASH_IPV6_DST_ONLY:
13535                 /* fall-through. */
13536         case MLX5_RSS_HASH_IPV6_SRC_ONLY:
13537                 hrxqs[3] = hrxq_idx;
13538                 return 0;
13539         case MLX5_RSS_HASH_IPV6_TCP:
13540                 /* fall-through. */
13541         case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
13542                 /* fall-through. */
13543         case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
13544                 hrxqs[4] = hrxq_idx;
13545                 return 0;
13546         case MLX5_RSS_HASH_IPV6_UDP:
13547                 /* fall-through. */
13548         case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
13549                 /* fall-through. */
13550         case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
13551                 hrxqs[5] = hrxq_idx;
13552                 return 0;
13553         case MLX5_RSS_HASH_NONE:
13554                 hrxqs[6] = hrxq_idx;
13555                 return 0;
13556         default:
13557                 return -1;
13558         }
13559 }
13560
13561 /**
13562  * Look up for hash RX queue by hash fields (see enum ibv_rx_hash_fields)
13563  * and tunnel.
13564  *
13565  * @param[in] dev
13566  *   Pointer to the Ethernet device structure.
13567  * @param[in] idx
13568  *   Shared RSS action ID holding hash RX queue objects.
13569  * @param[in] hash_fields
13570  *   Defines combination of packet fields to participate in RX hash.
13571  * @param[in] tunnel
13572  *   Tunnel type
13573  *
13574  * @return
13575  *   Valid hash RX queue index, otherwise 0.
13576  */
13577 static uint32_t
13578 __flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
13579                                  const uint64_t hash_fields)
13580 {
13581         struct mlx5_priv *priv = dev->data->dev_private;
13582         struct mlx5_shared_action_rss *shared_rss =
13583             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
13584         const uint32_t *hrxqs = shared_rss->hrxq;
13585
13586         switch (hash_fields & ~IBV_RX_HASH_INNER) {
13587         case MLX5_RSS_HASH_IPV4:
13588                 /* fall-through. */
13589         case MLX5_RSS_HASH_IPV4_DST_ONLY:
13590                 /* fall-through. */
13591         case MLX5_RSS_HASH_IPV4_SRC_ONLY:
13592                 return hrxqs[0];
13593         case MLX5_RSS_HASH_IPV4_TCP:
13594                 /* fall-through. */
13595         case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
13596                 /* fall-through. */
13597         case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
13598                 return hrxqs[1];
13599         case MLX5_RSS_HASH_IPV4_UDP:
13600                 /* fall-through. */
13601         case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
13602                 /* fall-through. */
13603         case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
13604                 return hrxqs[2];
13605         case MLX5_RSS_HASH_IPV6:
13606                 /* fall-through. */
13607         case MLX5_RSS_HASH_IPV6_DST_ONLY:
13608                 /* fall-through. */
13609         case MLX5_RSS_HASH_IPV6_SRC_ONLY:
13610                 return hrxqs[3];
13611         case MLX5_RSS_HASH_IPV6_TCP:
13612                 /* fall-through. */
13613         case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
13614                 /* fall-through. */
13615         case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
13616                 return hrxqs[4];
13617         case MLX5_RSS_HASH_IPV6_UDP:
13618                 /* fall-through. */
13619         case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
13620                 /* fall-through. */
13621         case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
13622                 return hrxqs[5];
13623         case MLX5_RSS_HASH_NONE:
13624                 return hrxqs[6];
13625         default:
13626                 return 0;
13627         }
13628
13629 }
13630
13631 /**
13632  * Apply the flow to the NIC, lock free,
13633  * (mutex should be acquired by caller).
13634  *
13635  * @param[in] dev
13636  *   Pointer to the Ethernet device structure.
13637  * @param[in, out] flow
13638  *   Pointer to flow structure.
13639  * @param[out] error
13640  *   Pointer to error structure.
13641  *
13642  * @return
13643  *   0 on success, a negative errno value otherwise and rte_errno is set.
13644  */
13645 static int
13646 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
13647               struct rte_flow_error *error)
13648 {
13649         struct mlx5_flow_dv_workspace *dv;
13650         struct mlx5_flow_handle *dh;
13651         struct mlx5_flow_handle_dv *dv_h;
13652         struct mlx5_flow *dev_flow;
13653         struct mlx5_priv *priv = dev->data->dev_private;
13654         uint32_t handle_idx;
13655         int n;
13656         int err;
13657         int idx;
13658         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
13659         struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
13660         uint8_t misc_mask;
13661
13662         MLX5_ASSERT(wks);
13663         for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
13664                 dev_flow = &wks->flows[idx];
13665                 dv = &dev_flow->dv;
13666                 dh = dev_flow->handle;
13667                 dv_h = &dh->dvh;
13668                 n = dv->actions_n;
13669                 if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
13670                         if (dv->transfer) {
13671                                 MLX5_ASSERT(priv->sh->dr_drop_action);
13672                                 dv->actions[n++] = priv->sh->dr_drop_action;
13673                         } else {
13674 #ifdef HAVE_MLX5DV_DR
13675                                 /* DR supports drop action placeholder. */
13676                                 MLX5_ASSERT(priv->sh->dr_drop_action);
13677                                 dv->actions[n++] = priv->sh->dr_drop_action;
13678 #else
13679                                 /* For DV we use the explicit drop queue. */
13680                                 MLX5_ASSERT(priv->drop_queue.hrxq);
13681                                 dv->actions[n++] =
13682                                                 priv->drop_queue.hrxq->action;
13683 #endif
13684                         }
13685                 } else if ((dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
13686                            !dv_h->rix_sample && !dv_h->rix_dest_array)) {
13687                         struct mlx5_hrxq *hrxq;
13688                         uint32_t hrxq_idx;
13689
13690                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
13691                                                     &hrxq_idx);
13692                         if (!hrxq) {
13693                                 rte_flow_error_set
13694                                         (error, rte_errno,
13695                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13696                                          "cannot get hash queue");
13697                                 goto error;
13698                         }
13699                         dh->rix_hrxq = hrxq_idx;
13700                         dv->actions[n++] = hrxq->action;
13701                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
13702                         struct mlx5_hrxq *hrxq = NULL;
13703                         uint32_t hrxq_idx;
13704
13705                         hrxq_idx = __flow_dv_action_rss_hrxq_lookup(dev,
13706                                                 rss_desc->shared_rss,
13707                                                 dev_flow->hash_fields);
13708                         if (hrxq_idx)
13709                                 hrxq = mlx5_ipool_get
13710                                         (priv->sh->ipool[MLX5_IPOOL_HRXQ],
13711                                          hrxq_idx);
13712                         if (!hrxq) {
13713                                 rte_flow_error_set
13714                                         (error, rte_errno,
13715                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13716                                          "cannot get hash queue");
13717                                 goto error;
13718                         }
13719                         dh->rix_srss = rss_desc->shared_rss;
13720                         dv->actions[n++] = hrxq->action;
13721                 } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
13722                         if (!priv->sh->default_miss_action) {
13723                                 rte_flow_error_set
13724                                         (error, rte_errno,
13725                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13726                                          "default miss action not be created.");
13727                                 goto error;
13728                         }
13729                         dv->actions[n++] = priv->sh->default_miss_action;
13730                 }
13731                 misc_mask = flow_dv_matcher_enable(dv->value.buf);
13732                 __flow_dv_adjust_buf_size(&dv->value.size, misc_mask);
13733                 err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object,
13734                                                (void *)&dv->value, n,
13735                                                dv->actions, &dh->drv_flow);
13736                 if (err) {
13737                         rte_flow_error_set
13738                                 (error, errno,
13739                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13740                                 NULL,
13741                                 (!priv->config.allow_duplicate_pattern &&
13742                                 errno == EEXIST) ?
13743                                 "duplicating pattern is not allowed" :
13744                                 "hardware refuses to create flow");
13745                         goto error;
13746                 }
13747                 if (priv->vmwa_context &&
13748                     dh->vf_vlan.tag && !dh->vf_vlan.created) {
13749                         /*
13750                          * The rule contains the VLAN pattern.
13751                          * For VF we are going to create VLAN
13752                          * interface to make hypervisor set correct
13753                          * e-Switch vport context.
13754                          */
13755                         mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
13756                 }
13757         }
13758         return 0;
13759 error:
13760         err = rte_errno; /* Save rte_errno before cleanup. */
13761         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
13762                        handle_idx, dh, next) {
13763                 /* hrxq is union, don't clear it if the flag is not set. */
13764                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) {
13765                         mlx5_hrxq_release(dev, dh->rix_hrxq);
13766                         dh->rix_hrxq = 0;
13767                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
13768                         dh->rix_srss = 0;
13769                 }
13770                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
13771                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
13772         }
13773         rte_errno = err; /* Restore rte_errno. */
13774         return -rte_errno;
13775 }
13776
13777 void
13778 flow_dv_matcher_remove_cb(void *tool_ctx __rte_unused,
13779                           struct mlx5_list_entry *entry)
13780 {
13781         struct mlx5_flow_dv_matcher *resource = container_of(entry,
13782                                                              typeof(*resource),
13783                                                              entry);
13784
13785         claim_zero(mlx5_flow_os_destroy_flow_matcher(resource->matcher_object));
13786         mlx5_free(resource);
13787 }
13788
13789 /**
13790  * Release the flow matcher.
13791  *
13792  * @param dev
13793  *   Pointer to Ethernet device.
13794  * @param port_id
13795  *   Index to port ID action resource.
13796  *
13797  * @return
13798  *   1 while a reference on it exists, 0 when freed.
13799  */
13800 static int
13801 flow_dv_matcher_release(struct rte_eth_dev *dev,
13802                         struct mlx5_flow_handle *handle)
13803 {
13804         struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
13805         struct mlx5_flow_tbl_data_entry *tbl = container_of(matcher->tbl,
13806                                                             typeof(*tbl), tbl);
13807         int ret;
13808
13809         MLX5_ASSERT(matcher->matcher_object);
13810         ret = mlx5_list_unregister(tbl->matchers, &matcher->entry);
13811         flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl->tbl);
13812         return ret;
13813 }
13814
13815 void
13816 flow_dv_encap_decap_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
13817 {
13818         struct mlx5_dev_ctx_shared *sh = tool_ctx;
13819         struct mlx5_flow_dv_encap_decap_resource *res =
13820                                        container_of(entry, typeof(*res), entry);
13821
13822         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
13823         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
13824 }
13825
13826 /**
13827  * Release an encap/decap resource.
13828  *
13829  * @param dev
13830  *   Pointer to Ethernet device.
13831  * @param encap_decap_idx
13832  *   Index of encap decap resource.
13833  *
13834  * @return
13835  *   1 while a reference on it exists, 0 when freed.
13836  */
13837 static int
13838 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
13839                                      uint32_t encap_decap_idx)
13840 {
13841         struct mlx5_priv *priv = dev->data->dev_private;
13842         struct mlx5_flow_dv_encap_decap_resource *resource;
13843
13844         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
13845                                   encap_decap_idx);
13846         if (!resource)
13847                 return 0;
13848         MLX5_ASSERT(resource->action);
13849         return mlx5_hlist_unregister(priv->sh->encaps_decaps, &resource->entry);
13850 }
13851
13852 /**
13853  * Release an jump to table action resource.
13854  *
13855  * @param dev
13856  *   Pointer to Ethernet device.
13857  * @param rix_jump
13858  *   Index to the jump action resource.
13859  *
13860  * @return
13861  *   1 while a reference on it exists, 0 when freed.
13862  */
13863 static int
13864 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
13865                                   uint32_t rix_jump)
13866 {
13867         struct mlx5_priv *priv = dev->data->dev_private;
13868         struct mlx5_flow_tbl_data_entry *tbl_data;
13869
13870         tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
13871                                   rix_jump);
13872         if (!tbl_data)
13873                 return 0;
13874         return flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl_data->tbl);
13875 }
13876
13877 void
13878 flow_dv_modify_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
13879 {
13880         struct mlx5_flow_dv_modify_hdr_resource *res =
13881                 container_of(entry, typeof(*res), entry);
13882         struct mlx5_dev_ctx_shared *sh = tool_ctx;
13883
13884         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
13885         mlx5_ipool_free(sh->mdh_ipools[res->actions_num - 1], res->idx);
13886 }
13887
13888 /**
13889  * Release a modify-header resource.
13890  *
13891  * @param dev
13892  *   Pointer to Ethernet device.
13893  * @param handle
13894  *   Pointer to mlx5_flow_handle.
13895  *
13896  * @return
13897  *   1 while a reference on it exists, 0 when freed.
13898  */
13899 static int
13900 flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev,
13901                                     struct mlx5_flow_handle *handle)
13902 {
13903         struct mlx5_priv *priv = dev->data->dev_private;
13904         struct mlx5_flow_dv_modify_hdr_resource *entry = handle->dvh.modify_hdr;
13905
13906         MLX5_ASSERT(entry->action);
13907         return mlx5_hlist_unregister(priv->sh->modify_cmds, &entry->entry);
13908 }
13909
13910 void
13911 flow_dv_port_id_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
13912 {
13913         struct mlx5_dev_ctx_shared *sh = tool_ctx;
13914         struct mlx5_flow_dv_port_id_action_resource *resource =
13915                                   container_of(entry, typeof(*resource), entry);
13916
13917         claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
13918         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], resource->idx);
13919 }
13920
13921 /**
13922  * Release port ID action resource.
13923  *
13924  * @param dev
13925  *   Pointer to Ethernet device.
13926  * @param handle
13927  *   Pointer to mlx5_flow_handle.
13928  *
13929  * @return
13930  *   1 while a reference on it exists, 0 when freed.
13931  */
13932 static int
13933 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
13934                                         uint32_t port_id)
13935 {
13936         struct mlx5_priv *priv = dev->data->dev_private;
13937         struct mlx5_flow_dv_port_id_action_resource *resource;
13938
13939         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], port_id);
13940         if (!resource)
13941                 return 0;
13942         MLX5_ASSERT(resource->action);
13943         return mlx5_list_unregister(priv->sh->port_id_action_list,
13944                                     &resource->entry);
13945 }
13946
13947 /**
13948  * Release shared RSS action resource.
13949  *
13950  * @param dev
13951  *   Pointer to Ethernet device.
13952  * @param srss
13953  *   Shared RSS action index.
13954  */
13955 static void
13956 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss)
13957 {
13958         struct mlx5_priv *priv = dev->data->dev_private;
13959         struct mlx5_shared_action_rss *shared_rss;
13960
13961         shared_rss = mlx5_ipool_get
13962                         (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], srss);
13963         __atomic_sub_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
13964 }
13965
13966 void
13967 flow_dv_push_vlan_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
13968 {
13969         struct mlx5_dev_ctx_shared *sh = tool_ctx;
13970         struct mlx5_flow_dv_push_vlan_action_resource *resource =
13971                         container_of(entry, typeof(*resource), entry);
13972
13973         claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
13974         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], resource->idx);
13975 }
13976
13977 /**
13978  * Release push vlan action resource.
13979  *
13980  * @param dev
13981  *   Pointer to Ethernet device.
13982  * @param handle
13983  *   Pointer to mlx5_flow_handle.
13984  *
13985  * @return
13986  *   1 while a reference on it exists, 0 when freed.
13987  */
13988 static int
13989 flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev,
13990                                           struct mlx5_flow_handle *handle)
13991 {
13992         struct mlx5_priv *priv = dev->data->dev_private;
13993         struct mlx5_flow_dv_push_vlan_action_resource *resource;
13994         uint32_t idx = handle->dvh.rix_push_vlan;
13995
13996         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
13997         if (!resource)
13998                 return 0;
13999         MLX5_ASSERT(resource->action);
14000         return mlx5_list_unregister(priv->sh->push_vlan_action_list,
14001                                     &resource->entry);
14002 }
14003
14004 /**
14005  * Release the fate resource.
14006  *
14007  * @param dev
14008  *   Pointer to Ethernet device.
14009  * @param handle
14010  *   Pointer to mlx5_flow_handle.
14011  */
14012 static void
14013 flow_dv_fate_resource_release(struct rte_eth_dev *dev,
14014                                struct mlx5_flow_handle *handle)
14015 {
14016         if (!handle->rix_fate)
14017                 return;
14018         switch (handle->fate_action) {
14019         case MLX5_FLOW_FATE_QUEUE:
14020                 if (!handle->dvh.rix_sample && !handle->dvh.rix_dest_array)
14021                         mlx5_hrxq_release(dev, handle->rix_hrxq);
14022                 break;
14023         case MLX5_FLOW_FATE_JUMP:
14024                 flow_dv_jump_tbl_resource_release(dev, handle->rix_jump);
14025                 break;
14026         case MLX5_FLOW_FATE_PORT_ID:
14027                 flow_dv_port_id_action_resource_release(dev,
14028                                 handle->rix_port_id_action);
14029                 break;
14030         default:
14031                 DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
14032                 break;
14033         }
14034         handle->rix_fate = 0;
14035 }
14036
14037 void
14038 flow_dv_sample_remove_cb(void *tool_ctx __rte_unused,
14039                          struct mlx5_list_entry *entry)
14040 {
14041         struct mlx5_flow_dv_sample_resource *resource = container_of(entry,
14042                                                               typeof(*resource),
14043                                                               entry);
14044         struct rte_eth_dev *dev = resource->dev;
14045         struct mlx5_priv *priv = dev->data->dev_private;
14046
14047         if (resource->verbs_action)
14048                 claim_zero(mlx5_flow_os_destroy_flow_action
14049                                                       (resource->verbs_action));
14050         if (resource->normal_path_tbl)
14051                 flow_dv_tbl_resource_release(MLX5_SH(dev),
14052                                              resource->normal_path_tbl);
14053         flow_dv_sample_sub_actions_release(dev, &resource->sample_idx);
14054         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE], resource->idx);
14055         DRV_LOG(DEBUG, "sample resource %p: removed", (void *)resource);
14056 }
14057
14058 /**
14059  * Release an sample resource.
14060  *
14061  * @param dev
14062  *   Pointer to Ethernet device.
14063  * @param handle
14064  *   Pointer to mlx5_flow_handle.
14065  *
14066  * @return
14067  *   1 while a reference on it exists, 0 when freed.
14068  */
14069 static int
14070 flow_dv_sample_resource_release(struct rte_eth_dev *dev,
14071                                      struct mlx5_flow_handle *handle)
14072 {
14073         struct mlx5_priv *priv = dev->data->dev_private;
14074         struct mlx5_flow_dv_sample_resource *resource;
14075
14076         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
14077                                   handle->dvh.rix_sample);
14078         if (!resource)
14079                 return 0;
14080         MLX5_ASSERT(resource->verbs_action);
14081         return mlx5_list_unregister(priv->sh->sample_action_list,
14082                                     &resource->entry);
14083 }
14084
14085 void
14086 flow_dv_dest_array_remove_cb(void *tool_ctx __rte_unused,
14087                              struct mlx5_list_entry *entry)
14088 {
14089         struct mlx5_flow_dv_dest_array_resource *resource =
14090                         container_of(entry, typeof(*resource), entry);
14091         struct rte_eth_dev *dev = resource->dev;
14092         struct mlx5_priv *priv = dev->data->dev_private;
14093         uint32_t i = 0;
14094
14095         MLX5_ASSERT(resource->action);
14096         if (resource->action)
14097                 claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
14098         for (; i < resource->num_of_dest; i++)
14099                 flow_dv_sample_sub_actions_release(dev,
14100                                                    &resource->sample_idx[i]);
14101         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], resource->idx);
14102         DRV_LOG(DEBUG, "destination array resource %p: removed",
14103                 (void *)resource);
14104 }
14105
14106 /**
14107  * Release an destination array resource.
14108  *
14109  * @param dev
14110  *   Pointer to Ethernet device.
14111  * @param handle
14112  *   Pointer to mlx5_flow_handle.
14113  *
14114  * @return
14115  *   1 while a reference on it exists, 0 when freed.
14116  */
14117 static int
14118 flow_dv_dest_array_resource_release(struct rte_eth_dev *dev,
14119                                     struct mlx5_flow_handle *handle)
14120 {
14121         struct mlx5_priv *priv = dev->data->dev_private;
14122         struct mlx5_flow_dv_dest_array_resource *resource;
14123
14124         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
14125                                   handle->dvh.rix_dest_array);
14126         if (!resource)
14127                 return 0;
14128         MLX5_ASSERT(resource->action);
14129         return mlx5_list_unregister(priv->sh->dest_array_list,
14130                                     &resource->entry);
14131 }
14132
14133 static void
14134 flow_dv_geneve_tlv_option_resource_release(struct rte_eth_dev *dev)
14135 {
14136         struct mlx5_priv *priv = dev->data->dev_private;
14137         struct mlx5_dev_ctx_shared *sh = priv->sh;
14138         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
14139                                 sh->geneve_tlv_option_resource;
14140         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
14141         if (geneve_opt_resource) {
14142                 if (!(__atomic_sub_fetch(&geneve_opt_resource->refcnt, 1,
14143                                          __ATOMIC_RELAXED))) {
14144                         claim_zero(mlx5_devx_cmd_destroy
14145                                         (geneve_opt_resource->obj));
14146                         mlx5_free(sh->geneve_tlv_option_resource);
14147                         sh->geneve_tlv_option_resource = NULL;
14148                 }
14149         }
14150         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
14151 }
14152
14153 /**
14154  * Remove the flow from the NIC but keeps it in memory.
14155  * Lock free, (mutex should be acquired by caller).
14156  *
14157  * @param[in] dev
14158  *   Pointer to Ethernet device.
14159  * @param[in, out] flow
14160  *   Pointer to flow structure.
14161  */
14162 static void
14163 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
14164 {
14165         struct mlx5_flow_handle *dh;
14166         uint32_t handle_idx;
14167         struct mlx5_priv *priv = dev->data->dev_private;
14168
14169         if (!flow)
14170                 return;
14171         handle_idx = flow->dev_handles;
14172         while (handle_idx) {
14173                 dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
14174                                     handle_idx);
14175                 if (!dh)
14176                         return;
14177                 if (dh->drv_flow) {
14178                         claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow));
14179                         dh->drv_flow = NULL;
14180                 }
14181                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
14182                         flow_dv_fate_resource_release(dev, dh);
14183                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
14184                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
14185                 handle_idx = dh->next.next;
14186         }
14187 }
14188
14189 /**
14190  * Remove the flow from the NIC and the memory.
14191  * Lock free, (mutex should be acquired by caller).
14192  *
14193  * @param[in] dev
14194  *   Pointer to the Ethernet device structure.
14195  * @param[in, out] flow
14196  *   Pointer to flow structure.
14197  */
14198 static void
14199 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
14200 {
14201         struct mlx5_flow_handle *dev_handle;
14202         struct mlx5_priv *priv = dev->data->dev_private;
14203         struct mlx5_flow_meter_info *fm = NULL;
14204         uint32_t srss = 0;
14205
14206         if (!flow)
14207                 return;
14208         flow_dv_remove(dev, flow);
14209         if (flow->counter) {
14210                 flow_dv_counter_free(dev, flow->counter);
14211                 flow->counter = 0;
14212         }
14213         if (flow->meter) {
14214                 fm = flow_dv_meter_find_by_idx(priv, flow->meter);
14215                 if (fm)
14216                         mlx5_flow_meter_detach(priv, fm);
14217                 flow->meter = 0;
14218         }
14219         /* Keep the current age handling by default. */
14220         if (flow->indirect_type == MLX5_INDIRECT_ACTION_TYPE_CT && flow->ct)
14221                 flow_dv_aso_ct_release(dev, flow->ct);
14222         else if (flow->age)
14223                 flow_dv_aso_age_release(dev, flow->age);
14224         if (flow->geneve_tlv_option) {
14225                 flow_dv_geneve_tlv_option_resource_release(dev);
14226                 flow->geneve_tlv_option = 0;
14227         }
14228         while (flow->dev_handles) {
14229                 uint32_t tmp_idx = flow->dev_handles;
14230
14231                 dev_handle = mlx5_ipool_get(priv->sh->ipool
14232                                             [MLX5_IPOOL_MLX5_FLOW], tmp_idx);
14233                 if (!dev_handle)
14234                         return;
14235                 flow->dev_handles = dev_handle->next.next;
14236                 if (dev_handle->dvh.matcher)
14237                         flow_dv_matcher_release(dev, dev_handle);
14238                 if (dev_handle->dvh.rix_sample)
14239                         flow_dv_sample_resource_release(dev, dev_handle);
14240                 if (dev_handle->dvh.rix_dest_array)
14241                         flow_dv_dest_array_resource_release(dev, dev_handle);
14242                 if (dev_handle->dvh.rix_encap_decap)
14243                         flow_dv_encap_decap_resource_release(dev,
14244                                 dev_handle->dvh.rix_encap_decap);
14245                 if (dev_handle->dvh.modify_hdr)
14246                         flow_dv_modify_hdr_resource_release(dev, dev_handle);
14247                 if (dev_handle->dvh.rix_push_vlan)
14248                         flow_dv_push_vlan_action_resource_release(dev,
14249                                                                   dev_handle);
14250                 if (dev_handle->dvh.rix_tag)
14251                         flow_dv_tag_release(dev,
14252                                             dev_handle->dvh.rix_tag);
14253                 if (dev_handle->fate_action != MLX5_FLOW_FATE_SHARED_RSS)
14254                         flow_dv_fate_resource_release(dev, dev_handle);
14255                 else if (!srss)
14256                         srss = dev_handle->rix_srss;
14257                 if (fm && dev_handle->is_meter_flow_id &&
14258                     dev_handle->split_flow_id)
14259                         mlx5_ipool_free(fm->flow_ipool,
14260                                         dev_handle->split_flow_id);
14261                 else if (dev_handle->split_flow_id &&
14262                     !dev_handle->is_meter_flow_id)
14263                         mlx5_ipool_free(priv->sh->ipool
14264                                         [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
14265                                         dev_handle->split_flow_id);
14266                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
14267                            tmp_idx);
14268         }
14269         if (srss)
14270                 flow_dv_shared_rss_action_release(dev, srss);
14271 }
14272
14273 /**
14274  * Release array of hash RX queue objects.
14275  * Helper function.
14276  *
14277  * @param[in] dev
14278  *   Pointer to the Ethernet device structure.
14279  * @param[in, out] hrxqs
14280  *   Array of hash RX queue objects.
14281  *
14282  * @return
14283  *   Total number of references to hash RX queue objects in *hrxqs* array
14284  *   after this operation.
14285  */
14286 static int
14287 __flow_dv_hrxqs_release(struct rte_eth_dev *dev,
14288                         uint32_t (*hrxqs)[MLX5_RSS_HASH_FIELDS_LEN])
14289 {
14290         size_t i;
14291         int remaining = 0;
14292
14293         for (i = 0; i < RTE_DIM(*hrxqs); i++) {
14294                 int ret = mlx5_hrxq_release(dev, (*hrxqs)[i]);
14295
14296                 if (!ret)
14297                         (*hrxqs)[i] = 0;
14298                 remaining += ret;
14299         }
14300         return remaining;
14301 }
14302
14303 /**
14304  * Release all hash RX queue objects representing shared RSS action.
14305  *
14306  * @param[in] dev
14307  *   Pointer to the Ethernet device structure.
14308  * @param[in, out] action
14309  *   Shared RSS action to remove hash RX queue objects from.
14310  *
14311  * @return
14312  *   Total number of references to hash RX queue objects stored in *action*
14313  *   after this operation.
14314  *   Expected to be 0 if no external references held.
14315  */
14316 static int
14317 __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
14318                                  struct mlx5_shared_action_rss *shared_rss)
14319 {
14320         return __flow_dv_hrxqs_release(dev, &shared_rss->hrxq);
14321 }
14322
14323 /**
14324  * Adjust L3/L4 hash value of pre-created shared RSS hrxq according to
14325  * user input.
14326  *
14327  * Only one hash value is available for one L3+L4 combination:
14328  * for example:
14329  * MLX5_RSS_HASH_IPV4, MLX5_RSS_HASH_IPV4_SRC_ONLY, and
14330  * MLX5_RSS_HASH_IPV4_DST_ONLY are mutually exclusive so they can share
14331  * same slot in mlx5_rss_hash_fields.
14332  *
14333  * @param[in] rss
14334  *   Pointer to the shared action RSS conf.
14335  * @param[in, out] hash_field
14336  *   hash_field variable needed to be adjusted.
14337  *
14338  * @return
14339  *   void
14340  */
14341 static void
14342 __flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
14343                                      uint64_t *hash_field)
14344 {
14345         uint64_t rss_types = rss->origin.types;
14346
14347         switch (*hash_field & ~IBV_RX_HASH_INNER) {
14348         case MLX5_RSS_HASH_IPV4:
14349                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
14350                         *hash_field &= ~MLX5_RSS_HASH_IPV4;
14351                         if (rss_types & ETH_RSS_L3_DST_ONLY)
14352                                 *hash_field |= IBV_RX_HASH_DST_IPV4;
14353                         else if (rss_types & ETH_RSS_L3_SRC_ONLY)
14354                                 *hash_field |= IBV_RX_HASH_SRC_IPV4;
14355                         else
14356                                 *hash_field |= MLX5_RSS_HASH_IPV4;
14357                 }
14358                 return;
14359         case MLX5_RSS_HASH_IPV6:
14360                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
14361                         *hash_field &= ~MLX5_RSS_HASH_IPV6;
14362                         if (rss_types & ETH_RSS_L3_DST_ONLY)
14363                                 *hash_field |= IBV_RX_HASH_DST_IPV6;
14364                         else if (rss_types & ETH_RSS_L3_SRC_ONLY)
14365                                 *hash_field |= IBV_RX_HASH_SRC_IPV6;
14366                         else
14367                                 *hash_field |= MLX5_RSS_HASH_IPV6;
14368                 }
14369                 return;
14370         case MLX5_RSS_HASH_IPV4_UDP:
14371                 /* fall-through. */
14372         case MLX5_RSS_HASH_IPV6_UDP:
14373                 if (rss_types & ETH_RSS_UDP) {
14374                         *hash_field &= ~MLX5_UDP_IBV_RX_HASH;
14375                         if (rss_types & ETH_RSS_L4_DST_ONLY)
14376                                 *hash_field |= IBV_RX_HASH_DST_PORT_UDP;
14377                         else if (rss_types & ETH_RSS_L4_SRC_ONLY)
14378                                 *hash_field |= IBV_RX_HASH_SRC_PORT_UDP;
14379                         else
14380                                 *hash_field |= MLX5_UDP_IBV_RX_HASH;
14381                 }
14382                 return;
14383         case MLX5_RSS_HASH_IPV4_TCP:
14384                 /* fall-through. */
14385         case MLX5_RSS_HASH_IPV6_TCP:
14386                 if (rss_types & ETH_RSS_TCP) {
14387                         *hash_field &= ~MLX5_TCP_IBV_RX_HASH;
14388                         if (rss_types & ETH_RSS_L4_DST_ONLY)
14389                                 *hash_field |= IBV_RX_HASH_DST_PORT_TCP;
14390                         else if (rss_types & ETH_RSS_L4_SRC_ONLY)
14391                                 *hash_field |= IBV_RX_HASH_SRC_PORT_TCP;
14392                         else
14393                                 *hash_field |= MLX5_TCP_IBV_RX_HASH;
14394                 }
14395                 return;
14396         default:
14397                 return;
14398         }
14399 }
14400
14401 /**
14402  * Setup shared RSS action.
14403  * Prepare set of hash RX queue objects sufficient to handle all valid
14404  * hash_fields combinations (see enum ibv_rx_hash_fields).
14405  *
14406  * @param[in] dev
14407  *   Pointer to the Ethernet device structure.
14408  * @param[in] action_idx
14409  *   Shared RSS action ipool index.
14410  * @param[in, out] action
14411  *   Partially initialized shared RSS action.
14412  * @param[out] error
14413  *   Perform verbose error reporting if not NULL. Initialized in case of
14414  *   error only.
14415  *
14416  * @return
14417  *   0 on success, otherwise negative errno value.
14418  */
14419 static int
14420 __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
14421                            uint32_t action_idx,
14422                            struct mlx5_shared_action_rss *shared_rss,
14423                            struct rte_flow_error *error)
14424 {
14425         struct mlx5_flow_rss_desc rss_desc = { 0 };
14426         size_t i;
14427         int err;
14428
14429         if (mlx5_ind_table_obj_setup(dev, shared_rss->ind_tbl)) {
14430                 return rte_flow_error_set(error, rte_errno,
14431                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14432                                           "cannot setup indirection table");
14433         }
14434         memcpy(rss_desc.key, shared_rss->origin.key, MLX5_RSS_HASH_KEY_LEN);
14435         rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
14436         rss_desc.const_q = shared_rss->origin.queue;
14437         rss_desc.queue_num = shared_rss->origin.queue_num;
14438         /* Set non-zero value to indicate a shared RSS. */
14439         rss_desc.shared_rss = action_idx;
14440         rss_desc.ind_tbl = shared_rss->ind_tbl;
14441         for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
14442                 uint32_t hrxq_idx;
14443                 uint64_t hash_fields = mlx5_rss_hash_fields[i];
14444                 int tunnel = 0;
14445
14446                 __flow_dv_action_rss_l34_hash_adjust(shared_rss, &hash_fields);
14447                 if (shared_rss->origin.level > 1) {
14448                         hash_fields |= IBV_RX_HASH_INNER;
14449                         tunnel = 1;
14450                 }
14451                 rss_desc.tunnel = tunnel;
14452                 rss_desc.hash_fields = hash_fields;
14453                 hrxq_idx = mlx5_hrxq_get(dev, &rss_desc);
14454                 if (!hrxq_idx) {
14455                         rte_flow_error_set
14456                                 (error, rte_errno,
14457                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14458                                  "cannot get hash queue");
14459                         goto error_hrxq_new;
14460                 }
14461                 err = __flow_dv_action_rss_hrxq_set
14462                         (shared_rss, hash_fields, hrxq_idx);
14463                 MLX5_ASSERT(!err);
14464         }
14465         return 0;
14466 error_hrxq_new:
14467         err = rte_errno;
14468         __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
14469         if (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true))
14470                 shared_rss->ind_tbl = NULL;
14471         rte_errno = err;
14472         return -rte_errno;
14473 }
14474
14475 /**
14476  * Create shared RSS action.
14477  *
14478  * @param[in] dev
14479  *   Pointer to the Ethernet device structure.
14480  * @param[in] conf
14481  *   Shared action configuration.
14482  * @param[in] rss
14483  *   RSS action specification used to create shared action.
14484  * @param[out] error
14485  *   Perform verbose error reporting if not NULL. Initialized in case of
14486  *   error only.
14487  *
14488  * @return
14489  *   A valid shared action ID in case of success, 0 otherwise and
14490  *   rte_errno is set.
14491  */
14492 static uint32_t
14493 __flow_dv_action_rss_create(struct rte_eth_dev *dev,
14494                             const struct rte_flow_indir_action_conf *conf,
14495                             const struct rte_flow_action_rss *rss,
14496                             struct rte_flow_error *error)
14497 {
14498         struct mlx5_priv *priv = dev->data->dev_private;
14499         struct mlx5_shared_action_rss *shared_rss = NULL;
14500         void *queue = NULL;
14501         struct rte_flow_action_rss *origin;
14502         const uint8_t *rss_key;
14503         uint32_t queue_size = rss->queue_num * sizeof(uint16_t);
14504         uint32_t idx;
14505
14506         RTE_SET_USED(conf);
14507         queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
14508                             0, SOCKET_ID_ANY);
14509         shared_rss = mlx5_ipool_zmalloc
14510                          (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], &idx);
14511         if (!shared_rss || !queue) {
14512                 rte_flow_error_set(error, ENOMEM,
14513                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14514                                    "cannot allocate resource memory");
14515                 goto error_rss_init;
14516         }
14517         if (idx > (1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET)) {
14518                 rte_flow_error_set(error, E2BIG,
14519                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14520                                    "rss action number out of range");
14521                 goto error_rss_init;
14522         }
14523         shared_rss->ind_tbl = mlx5_malloc(MLX5_MEM_ZERO,
14524                                           sizeof(*shared_rss->ind_tbl),
14525                                           0, SOCKET_ID_ANY);
14526         if (!shared_rss->ind_tbl) {
14527                 rte_flow_error_set(error, ENOMEM,
14528                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14529                                    "cannot allocate resource memory");
14530                 goto error_rss_init;
14531         }
14532         memcpy(queue, rss->queue, queue_size);
14533         shared_rss->ind_tbl->queues = queue;
14534         shared_rss->ind_tbl->queues_n = rss->queue_num;
14535         origin = &shared_rss->origin;
14536         origin->func = rss->func;
14537         origin->level = rss->level;
14538         /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
14539         origin->types = !rss->types ? ETH_RSS_IP : rss->types;
14540         /* NULL RSS key indicates default RSS key. */
14541         rss_key = !rss->key ? rss_hash_default_key : rss->key;
14542         memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
14543         origin->key = &shared_rss->key[0];
14544         origin->key_len = MLX5_RSS_HASH_KEY_LEN;
14545         origin->queue = queue;
14546         origin->queue_num = rss->queue_num;
14547         if (__flow_dv_action_rss_setup(dev, idx, shared_rss, error))
14548                 goto error_rss_init;
14549         rte_spinlock_init(&shared_rss->action_rss_sl);
14550         __atomic_add_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
14551         rte_spinlock_lock(&priv->shared_act_sl);
14552         ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14553                      &priv->rss_shared_actions, idx, shared_rss, next);
14554         rte_spinlock_unlock(&priv->shared_act_sl);
14555         return idx;
14556 error_rss_init:
14557         if (shared_rss) {
14558                 if (shared_rss->ind_tbl)
14559                         mlx5_free(shared_rss->ind_tbl);
14560                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14561                                 idx);
14562         }
14563         if (queue)
14564                 mlx5_free(queue);
14565         return 0;
14566 }
14567
14568 /**
14569  * Destroy the shared RSS action.
14570  * Release related hash RX queue objects.
14571  *
14572  * @param[in] dev
14573  *   Pointer to the Ethernet device structure.
14574  * @param[in] idx
14575  *   The shared RSS action object ID to be removed.
14576  * @param[out] error
14577  *   Perform verbose error reporting if not NULL. Initialized in case of
14578  *   error only.
14579  *
14580  * @return
14581  *   0 on success, otherwise negative errno value.
14582  */
14583 static int
14584 __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx,
14585                              struct rte_flow_error *error)
14586 {
14587         struct mlx5_priv *priv = dev->data->dev_private;
14588         struct mlx5_shared_action_rss *shared_rss =
14589             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
14590         uint32_t old_refcnt = 1;
14591         int remaining;
14592         uint16_t *queue = NULL;
14593
14594         if (!shared_rss)
14595                 return rte_flow_error_set(error, EINVAL,
14596                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14597                                           "invalid shared action");
14598         remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
14599         if (remaining)
14600                 return rte_flow_error_set(error, EBUSY,
14601                                           RTE_FLOW_ERROR_TYPE_ACTION,
14602                                           NULL,
14603                                           "shared rss hrxq has references");
14604         if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt,
14605                                          0, 0, __ATOMIC_ACQUIRE,
14606                                          __ATOMIC_RELAXED))
14607                 return rte_flow_error_set(error, EBUSY,
14608                                           RTE_FLOW_ERROR_TYPE_ACTION,
14609                                           NULL,
14610                                           "shared rss has references");
14611         queue = shared_rss->ind_tbl->queues;
14612         remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true);
14613         if (remaining)
14614                 return rte_flow_error_set(error, EBUSY,
14615                                           RTE_FLOW_ERROR_TYPE_ACTION,
14616                                           NULL,
14617                                           "shared rss indirection table has"
14618                                           " references");
14619         mlx5_free(queue);
14620         rte_spinlock_lock(&priv->shared_act_sl);
14621         ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14622                      &priv->rss_shared_actions, idx, shared_rss, next);
14623         rte_spinlock_unlock(&priv->shared_act_sl);
14624         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14625                         idx);
14626         return 0;
14627 }
14628
14629 /**
14630  * Create indirect action, lock free,
14631  * (mutex should be acquired by caller).
14632  * Dispatcher for action type specific call.
14633  *
14634  * @param[in] dev
14635  *   Pointer to the Ethernet device structure.
14636  * @param[in] conf
14637  *   Shared action configuration.
14638  * @param[in] action
14639  *   Action specification used to create indirect action.
14640  * @param[out] error
14641  *   Perform verbose error reporting if not NULL. Initialized in case of
14642  *   error only.
14643  *
14644  * @return
14645  *   A valid shared action handle in case of success, NULL otherwise and
14646  *   rte_errno is set.
14647  */
14648 static struct rte_flow_action_handle *
14649 flow_dv_action_create(struct rte_eth_dev *dev,
14650                       const struct rte_flow_indir_action_conf *conf,
14651                       const struct rte_flow_action *action,
14652                       struct rte_flow_error *err)
14653 {
14654         struct mlx5_priv *priv = dev->data->dev_private;
14655         uint32_t age_idx = 0;
14656         uint32_t idx = 0;
14657         uint32_t ret = 0;
14658
14659         switch (action->type) {
14660         case RTE_FLOW_ACTION_TYPE_RSS:
14661                 ret = __flow_dv_action_rss_create(dev, conf, action->conf, err);
14662                 idx = (MLX5_INDIRECT_ACTION_TYPE_RSS <<
14663                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
14664                 break;
14665         case RTE_FLOW_ACTION_TYPE_AGE:
14666                 age_idx = flow_dv_aso_age_alloc(dev, err);
14667                 if (!age_idx) {
14668                         ret = -rte_errno;
14669                         break;
14670                 }
14671                 idx = (MLX5_INDIRECT_ACTION_TYPE_AGE <<
14672                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | age_idx;
14673                 flow_dv_aso_age_params_init(dev, age_idx,
14674                                         ((const struct rte_flow_action_age *)
14675                                                 action->conf)->context ?
14676                                         ((const struct rte_flow_action_age *)
14677                                                 action->conf)->context :
14678                                         (void *)(uintptr_t)idx,
14679                                         ((const struct rte_flow_action_age *)
14680                                                 action->conf)->timeout);
14681                 ret = age_idx;
14682                 break;
14683         case RTE_FLOW_ACTION_TYPE_COUNT:
14684                 ret = flow_dv_translate_create_counter(dev, NULL, NULL, NULL);
14685                 idx = (MLX5_INDIRECT_ACTION_TYPE_COUNT <<
14686                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
14687                 break;
14688         case RTE_FLOW_ACTION_TYPE_CONNTRACK:
14689                 ret = flow_dv_translate_create_conntrack(dev, action->conf,
14690                                                          err);
14691                 idx = MLX5_INDIRECT_ACT_CT_GEN_IDX(PORT_ID(priv), ret);
14692                 break;
14693         default:
14694                 rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
14695                                    NULL, "action type not supported");
14696                 break;
14697         }
14698         return ret ? (struct rte_flow_action_handle *)(uintptr_t)idx : NULL;
14699 }
14700
14701 /**
14702  * Destroy the indirect action.
14703  * Release action related resources on the NIC and the memory.
14704  * Lock free, (mutex should be acquired by caller).
14705  * Dispatcher for action type specific call.
14706  *
14707  * @param[in] dev
14708  *   Pointer to the Ethernet device structure.
14709  * @param[in] handle
14710  *   The indirect action object handle to be removed.
14711  * @param[out] error
14712  *   Perform verbose error reporting if not NULL. Initialized in case of
14713  *   error only.
14714  *
14715  * @return
14716  *   0 on success, otherwise negative errno value.
14717  */
14718 static int
14719 flow_dv_action_destroy(struct rte_eth_dev *dev,
14720                        struct rte_flow_action_handle *handle,
14721                        struct rte_flow_error *error)
14722 {
14723         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
14724         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
14725         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
14726         struct mlx5_flow_counter *cnt;
14727         uint32_t no_flow_refcnt = 1;
14728         int ret;
14729
14730         switch (type) {
14731         case MLX5_INDIRECT_ACTION_TYPE_RSS:
14732                 return __flow_dv_action_rss_release(dev, idx, error);
14733         case MLX5_INDIRECT_ACTION_TYPE_COUNT:
14734                 cnt = flow_dv_counter_get_by_idx(dev, idx, NULL);
14735                 if (!__atomic_compare_exchange_n(&cnt->shared_info.refcnt,
14736                                                  &no_flow_refcnt, 1, false,
14737                                                  __ATOMIC_ACQUIRE,
14738                                                  __ATOMIC_RELAXED))
14739                         return rte_flow_error_set(error, EBUSY,
14740                                                   RTE_FLOW_ERROR_TYPE_ACTION,
14741                                                   NULL,
14742                                                   "Indirect count action has references");
14743                 flow_dv_counter_free(dev, idx);
14744                 return 0;
14745         case MLX5_INDIRECT_ACTION_TYPE_AGE:
14746                 ret = flow_dv_aso_age_release(dev, idx);
14747                 if (ret)
14748                         /*
14749                          * In this case, the last flow has a reference will
14750                          * actually release the age action.
14751                          */
14752                         DRV_LOG(DEBUG, "Indirect age action %" PRIu32 " was"
14753                                 " released with references %d.", idx, ret);
14754                 return 0;
14755         case MLX5_INDIRECT_ACTION_TYPE_CT:
14756                 ret = flow_dv_aso_ct_release(dev, idx);
14757                 if (ret < 0)
14758                         return ret;
14759                 if (ret > 0)
14760                         DRV_LOG(DEBUG, "Connection tracking object %u still "
14761                                 "has references %d.", idx, ret);
14762                 return 0;
14763         default:
14764                 return rte_flow_error_set(error, ENOTSUP,
14765                                           RTE_FLOW_ERROR_TYPE_ACTION,
14766                                           NULL,
14767                                           "action type not supported");
14768         }
14769 }
14770
14771 /**
14772  * Updates in place shared RSS action configuration.
14773  *
14774  * @param[in] dev
14775  *   Pointer to the Ethernet device structure.
14776  * @param[in] idx
14777  *   The shared RSS action object ID to be updated.
14778  * @param[in] action_conf
14779  *   RSS action specification used to modify *shared_rss*.
14780  * @param[out] error
14781  *   Perform verbose error reporting if not NULL. Initialized in case of
14782  *   error only.
14783  *
14784  * @return
14785  *   0 on success, otherwise negative errno value.
14786  * @note: currently only support update of RSS queues.
14787  */
14788 static int
14789 __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx,
14790                             const struct rte_flow_action_rss *action_conf,
14791                             struct rte_flow_error *error)
14792 {
14793         struct mlx5_priv *priv = dev->data->dev_private;
14794         struct mlx5_shared_action_rss *shared_rss =
14795             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
14796         int ret = 0;
14797         void *queue = NULL;
14798         uint16_t *queue_old = NULL;
14799         uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);
14800
14801         if (!shared_rss)
14802                 return rte_flow_error_set(error, EINVAL,
14803                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14804                                           "invalid shared action to update");
14805         if (priv->obj_ops.ind_table_modify == NULL)
14806                 return rte_flow_error_set(error, ENOTSUP,
14807                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14808                                           "cannot modify indirection table");
14809         queue = mlx5_malloc(MLX5_MEM_ZERO,
14810                             RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
14811                             0, SOCKET_ID_ANY);
14812         if (!queue)
14813                 return rte_flow_error_set(error, ENOMEM,
14814                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14815                                           NULL,
14816                                           "cannot allocate resource memory");
14817         memcpy(queue, action_conf->queue, queue_size);
14818         MLX5_ASSERT(shared_rss->ind_tbl);
14819         rte_spinlock_lock(&shared_rss->action_rss_sl);
14820         queue_old = shared_rss->ind_tbl->queues;
14821         ret = mlx5_ind_table_obj_modify(dev, shared_rss->ind_tbl,
14822                                         queue, action_conf->queue_num, true);
14823         if (ret) {
14824                 mlx5_free(queue);
14825                 ret = rte_flow_error_set(error, rte_errno,
14826                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14827                                           "cannot update indirection table");
14828         } else {
14829                 mlx5_free(queue_old);
14830                 shared_rss->origin.queue = queue;
14831                 shared_rss->origin.queue_num = action_conf->queue_num;
14832         }
14833         rte_spinlock_unlock(&shared_rss->action_rss_sl);
14834         return ret;
14835 }
14836
14837 /*
14838  * Updates in place conntrack context or direction.
14839  * Context update should be synchronized.
14840  *
14841  * @param[in] dev
14842  *   Pointer to the Ethernet device structure.
14843  * @param[in] idx
14844  *   The conntrack object ID to be updated.
14845  * @param[in] update
14846  *   Pointer to the structure of information to update.
14847  * @param[out] error
14848  *   Perform verbose error reporting if not NULL. Initialized in case of
14849  *   error only.
14850  *
14851  * @return
14852  *   0 on success, otherwise negative errno value.
14853  */
14854 static int
14855 __flow_dv_action_ct_update(struct rte_eth_dev *dev, uint32_t idx,
14856                            const struct rte_flow_modify_conntrack *update,
14857                            struct rte_flow_error *error)
14858 {
14859         struct mlx5_priv *priv = dev->data->dev_private;
14860         struct mlx5_aso_ct_action *ct;
14861         const struct rte_flow_action_conntrack *new_prf;
14862         int ret = 0;
14863         uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx);
14864         uint32_t dev_idx;
14865
14866         if (PORT_ID(priv) != owner)
14867                 return rte_flow_error_set(error, EACCES,
14868                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14869                                           NULL,
14870                                           "CT object owned by another port");
14871         dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx);
14872         ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx);
14873         if (!ct->refcnt)
14874                 return rte_flow_error_set(error, ENOMEM,
14875                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14876                                           NULL,
14877                                           "CT object is inactive");
14878         new_prf = &update->new_ct;
14879         if (update->direction)
14880                 ct->is_original = !!new_prf->is_original_dir;
14881         if (update->state) {
14882                 /* Only validate the profile when it needs to be updated. */
14883                 ret = mlx5_validate_action_ct(dev, new_prf, error);
14884                 if (ret)
14885                         return ret;
14886                 ret = mlx5_aso_ct_update_by_wqe(priv->sh, ct, new_prf);
14887                 if (ret)
14888                         return rte_flow_error_set(error, EIO,
14889                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14890                                         NULL,
14891                                         "Failed to send CT context update WQE");
14892                 /* Block until ready or a failure. */
14893                 ret = mlx5_aso_ct_available(priv->sh, ct);
14894                 if (ret)
14895                         rte_flow_error_set(error, rte_errno,
14896                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14897                                            NULL,
14898                                            "Timeout to get the CT update");
14899         }
14900         return ret;
14901 }
14902
14903 /**
14904  * Updates in place shared action configuration, lock free,
14905  * (mutex should be acquired by caller).
14906  *
14907  * @param[in] dev
14908  *   Pointer to the Ethernet device structure.
14909  * @param[in] handle
14910  *   The indirect action object handle to be updated.
14911  * @param[in] update
14912  *   Action specification used to modify the action pointed by *handle*.
14913  *   *update* could be of same type with the action pointed by the *handle*
14914  *   handle argument, or some other structures like a wrapper, depending on
14915  *   the indirect action type.
14916  * @param[out] error
14917  *   Perform verbose error reporting if not NULL. Initialized in case of
14918  *   error only.
14919  *
14920  * @return
14921  *   0 on success, otherwise negative errno value.
14922  */
14923 static int
14924 flow_dv_action_update(struct rte_eth_dev *dev,
14925                         struct rte_flow_action_handle *handle,
14926                         const void *update,
14927                         struct rte_flow_error *err)
14928 {
14929         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
14930         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
14931         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
14932         const void *action_conf;
14933
14934         switch (type) {
14935         case MLX5_INDIRECT_ACTION_TYPE_RSS:
14936                 action_conf = ((const struct rte_flow_action *)update)->conf;
14937                 return __flow_dv_action_rss_update(dev, idx, action_conf, err);
14938         case MLX5_INDIRECT_ACTION_TYPE_CT:
14939                 return __flow_dv_action_ct_update(dev, idx, update, err);
14940         default:
14941                 return rte_flow_error_set(err, ENOTSUP,
14942                                           RTE_FLOW_ERROR_TYPE_ACTION,
14943                                           NULL,
14944                                           "action type update not supported");
14945         }
14946 }
14947
14948 /**
14949  * Destroy the meter sub policy table rules.
14950  * Lock free, (mutex should be acquired by caller).
14951  *
14952  * @param[in] dev
14953  *   Pointer to Ethernet device.
14954  * @param[in] sub_policy
14955  *   Pointer to meter sub policy table.
14956  */
14957 static void
14958 __flow_dv_destroy_sub_policy_rules(struct rte_eth_dev *dev,
14959                              struct mlx5_flow_meter_sub_policy *sub_policy)
14960 {
14961         struct mlx5_priv *priv = dev->data->dev_private;
14962         struct mlx5_flow_tbl_data_entry *tbl;
14963         struct mlx5_flow_meter_policy *policy = sub_policy->main_policy;
14964         struct mlx5_flow_meter_info *next_fm;
14965         struct mlx5_sub_policy_color_rule *color_rule;
14966         void *tmp;
14967         uint32_t i;
14968
14969         for (i = 0; i < RTE_COLORS; i++) {
14970                 next_fm = NULL;
14971                 if (i == RTE_COLOR_GREEN && policy &&
14972                     policy->act_cnt[i].fate_action == MLX5_FLOW_FATE_MTR)
14973                         next_fm = mlx5_flow_meter_find(priv,
14974                                         policy->act_cnt[i].next_mtr_id, NULL);
14975                 TAILQ_FOREACH_SAFE(color_rule, &sub_policy->color_rules[i],
14976                                    next_port, tmp) {
14977                         claim_zero(mlx5_flow_os_destroy_flow(color_rule->rule));
14978                         tbl = container_of(color_rule->matcher->tbl,
14979                                         typeof(*tbl), tbl);
14980                         mlx5_list_unregister(tbl->matchers,
14981                                                 &color_rule->matcher->entry);
14982                         TAILQ_REMOVE(&sub_policy->color_rules[i],
14983                                         color_rule, next_port);
14984                         mlx5_free(color_rule);
14985                         if (next_fm)
14986                                 mlx5_flow_meter_detach(priv, next_fm);
14987                 }
14988         }
14989         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
14990                 if (sub_policy->rix_hrxq[i]) {
14991                         if (policy && !policy->is_hierarchy)
14992                                 mlx5_hrxq_release(dev, sub_policy->rix_hrxq[i]);
14993                         sub_policy->rix_hrxq[i] = 0;
14994                 }
14995                 if (sub_policy->jump_tbl[i]) {
14996                         flow_dv_tbl_resource_release(MLX5_SH(dev),
14997                         sub_policy->jump_tbl[i]);
14998                         sub_policy->jump_tbl[i] = NULL;
14999                 }
15000         }
15001         if (sub_policy->tbl_rsc) {
15002                 flow_dv_tbl_resource_release(MLX5_SH(dev),
15003                         sub_policy->tbl_rsc);
15004                 sub_policy->tbl_rsc = NULL;
15005         }
15006 }
15007
15008 /**
15009  * Destroy policy rules, lock free,
15010  * (mutex should be acquired by caller).
15011  * Dispatcher for action type specific call.
15012  *
15013  * @param[in] dev
15014  *   Pointer to the Ethernet device structure.
15015  * @param[in] mtr_policy
15016  *   Meter policy struct.
15017  */
15018 static void
15019 flow_dv_destroy_policy_rules(struct rte_eth_dev *dev,
15020                       struct mlx5_flow_meter_policy *mtr_policy)
15021 {
15022         uint32_t i, j;
15023         struct mlx5_flow_meter_sub_policy *sub_policy;
15024         uint16_t sub_policy_num;
15025
15026         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15027                 sub_policy_num = (mtr_policy->sub_policy_num >>
15028                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
15029                         MLX5_MTR_SUB_POLICY_NUM_MASK;
15030                 for (j = 0; j < sub_policy_num; j++) {
15031                         sub_policy = mtr_policy->sub_policys[i][j];
15032                         if (sub_policy)
15033                                 __flow_dv_destroy_sub_policy_rules
15034                                                 (dev, sub_policy);
15035                 }
15036         }
15037 }
15038
15039 /**
15040  * Destroy policy action, lock free,
15041  * (mutex should be acquired by caller).
15042  * Dispatcher for action type specific call.
15043  *
15044  * @param[in] dev
15045  *   Pointer to the Ethernet device structure.
15046  * @param[in] mtr_policy
15047  *   Meter policy struct.
15048  */
15049 static void
15050 flow_dv_destroy_mtr_policy_acts(struct rte_eth_dev *dev,
15051                       struct mlx5_flow_meter_policy *mtr_policy)
15052 {
15053         struct rte_flow_action *rss_action;
15054         struct mlx5_flow_handle dev_handle;
15055         uint32_t i, j;
15056
15057         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
15058                 if (mtr_policy->act_cnt[i].rix_mark) {
15059                         flow_dv_tag_release(dev,
15060                                 mtr_policy->act_cnt[i].rix_mark);
15061                         mtr_policy->act_cnt[i].rix_mark = 0;
15062                 }
15063                 if (mtr_policy->act_cnt[i].modify_hdr) {
15064                         dev_handle.dvh.modify_hdr =
15065                                 mtr_policy->act_cnt[i].modify_hdr;
15066                         flow_dv_modify_hdr_resource_release(dev, &dev_handle);
15067                 }
15068                 switch (mtr_policy->act_cnt[i].fate_action) {
15069                 case MLX5_FLOW_FATE_SHARED_RSS:
15070                         rss_action = mtr_policy->act_cnt[i].rss;
15071                         mlx5_free(rss_action);
15072                         break;
15073                 case MLX5_FLOW_FATE_PORT_ID:
15074                         if (mtr_policy->act_cnt[i].rix_port_id_action) {
15075                                 flow_dv_port_id_action_resource_release(dev,
15076                                 mtr_policy->act_cnt[i].rix_port_id_action);
15077                                 mtr_policy->act_cnt[i].rix_port_id_action = 0;
15078                         }
15079                         break;
15080                 case MLX5_FLOW_FATE_DROP:
15081                 case MLX5_FLOW_FATE_JUMP:
15082                         for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
15083                                 mtr_policy->act_cnt[i].dr_jump_action[j] =
15084                                                 NULL;
15085                         break;
15086                 default:
15087                         /*Queue action do nothing*/
15088                         break;
15089                 }
15090         }
15091         for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
15092                 mtr_policy->dr_drop_action[j] = NULL;
15093 }
15094
15095 /**
15096  * Create policy action per domain, lock free,
15097  * (mutex should be acquired by caller).
15098  * Dispatcher for action type specific call.
15099  *
15100  * @param[in] dev
15101  *   Pointer to the Ethernet device structure.
15102  * @param[in] mtr_policy
15103  *   Meter policy struct.
15104  * @param[in] action
15105  *   Action specification used to create meter actions.
15106  * @param[out] error
15107  *   Perform verbose error reporting if not NULL. Initialized in case of
15108  *   error only.
15109  *
15110  * @return
15111  *   0 on success, otherwise negative errno value.
15112  */
15113 static int
15114 __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,
15115                         struct mlx5_flow_meter_policy *mtr_policy,
15116                         const struct rte_flow_action *actions[RTE_COLORS],
15117                         enum mlx5_meter_domain domain,
15118                         struct rte_mtr_error *error)
15119 {
15120         struct mlx5_priv *priv = dev->data->dev_private;
15121         struct rte_flow_error flow_err;
15122         const struct rte_flow_action *act;
15123         uint64_t action_flags = 0;
15124         struct mlx5_flow_handle dh;
15125         struct mlx5_flow dev_flow;
15126         struct mlx5_flow_dv_port_id_action_resource port_id_action;
15127         int i, ret;
15128         uint8_t egress, transfer;
15129         struct mlx5_meter_policy_action_container *act_cnt = NULL;
15130         union {
15131                 struct mlx5_flow_dv_modify_hdr_resource res;
15132                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
15133                             sizeof(struct mlx5_modification_cmd) *
15134                             (MLX5_MAX_MODIFY_NUM + 1)];
15135         } mhdr_dummy;
15136         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
15137
15138         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
15139         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
15140         memset(&dh, 0, sizeof(struct mlx5_flow_handle));
15141         memset(&dev_flow, 0, sizeof(struct mlx5_flow));
15142         memset(&port_id_action, 0,
15143                 sizeof(struct mlx5_flow_dv_port_id_action_resource));
15144         memset(mhdr_res, 0, sizeof(*mhdr_res));
15145         mhdr_res->ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
15146                                         egress ?
15147                                         MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
15148                                         MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
15149         dev_flow.handle = &dh;
15150         dev_flow.dv.port_id_action = &port_id_action;
15151         dev_flow.external = true;
15152         for (i = 0; i < RTE_COLORS; i++) {
15153                 if (i < MLX5_MTR_RTE_COLORS)
15154                         act_cnt = &mtr_policy->act_cnt[i];
15155                 for (act = actions[i];
15156                         act && act->type != RTE_FLOW_ACTION_TYPE_END;
15157                         act++) {
15158                         switch (act->type) {
15159                         case RTE_FLOW_ACTION_TYPE_MARK:
15160                         {
15161                                 uint32_t tag_be = mlx5_flow_mark_set
15162                                         (((const struct rte_flow_action_mark *)
15163                                         (act->conf))->id);
15164
15165                                 if (i >= MLX5_MTR_RTE_COLORS)
15166                                         return -rte_mtr_error_set(error,
15167                                           ENOTSUP,
15168                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15169                                           NULL,
15170                                           "cannot create policy "
15171                                           "mark action for this color");
15172                                 dev_flow.handle->mark = 1;
15173                                 if (flow_dv_tag_resource_register(dev, tag_be,
15174                                                   &dev_flow, &flow_err))
15175                                         return -rte_mtr_error_set(error,
15176                                         ENOTSUP,
15177                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15178                                         NULL,
15179                                         "cannot setup policy mark action");
15180                                 MLX5_ASSERT(dev_flow.dv.tag_resource);
15181                                 act_cnt->rix_mark =
15182                                         dev_flow.handle->dvh.rix_tag;
15183                                 action_flags |= MLX5_FLOW_ACTION_MARK;
15184                                 break;
15185                         }
15186                         case RTE_FLOW_ACTION_TYPE_SET_TAG:
15187                                 if (i >= MLX5_MTR_RTE_COLORS)
15188                                         return -rte_mtr_error_set(error,
15189                                           ENOTSUP,
15190                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15191                                           NULL,
15192                                           "cannot create policy "
15193                                           "set tag action for this color");
15194                                 if (flow_dv_convert_action_set_tag
15195                                 (dev, mhdr_res,
15196                                 (const struct rte_flow_action_set_tag *)
15197                                 act->conf,  &flow_err))
15198                                         return -rte_mtr_error_set(error,
15199                                         ENOTSUP,
15200                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15201                                         NULL, "cannot convert policy "
15202                                         "set tag action");
15203                                 if (!mhdr_res->actions_num)
15204                                         return -rte_mtr_error_set(error,
15205                                         ENOTSUP,
15206                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15207                                         NULL, "cannot find policy "
15208                                         "set tag action");
15209                                 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
15210                                 break;
15211                         case RTE_FLOW_ACTION_TYPE_DROP:
15212                         {
15213                                 struct mlx5_flow_mtr_mng *mtrmng =
15214                                                 priv->sh->mtrmng;
15215                                 struct mlx5_flow_tbl_data_entry *tbl_data;
15216
15217                                 /*
15218                                  * Create the drop table with
15219                                  * METER DROP level.
15220                                  */
15221                                 if (!mtrmng->drop_tbl[domain]) {
15222                                         mtrmng->drop_tbl[domain] =
15223                                         flow_dv_tbl_resource_get(dev,
15224                                         MLX5_FLOW_TABLE_LEVEL_METER,
15225                                         egress, transfer, false, NULL, 0,
15226                                         0, MLX5_MTR_TABLE_ID_DROP, &flow_err);
15227                                         if (!mtrmng->drop_tbl[domain])
15228                                                 return -rte_mtr_error_set
15229                                         (error, ENOTSUP,
15230                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15231                                         NULL,
15232                                         "Failed to create meter drop table");
15233                                 }
15234                                 tbl_data = container_of
15235                                 (mtrmng->drop_tbl[domain],
15236                                 struct mlx5_flow_tbl_data_entry, tbl);
15237                                 if (i < MLX5_MTR_RTE_COLORS) {
15238                                         act_cnt->dr_jump_action[domain] =
15239                                                 tbl_data->jump.action;
15240                                         act_cnt->fate_action =
15241                                                 MLX5_FLOW_FATE_DROP;
15242                                 }
15243                                 if (i == RTE_COLOR_RED)
15244                                         mtr_policy->dr_drop_action[domain] =
15245                                                 tbl_data->jump.action;
15246                                 action_flags |= MLX5_FLOW_ACTION_DROP;
15247                                 break;
15248                         }
15249                         case RTE_FLOW_ACTION_TYPE_QUEUE:
15250                         {
15251                                 if (i >= MLX5_MTR_RTE_COLORS)
15252                                         return -rte_mtr_error_set(error,
15253                                         ENOTSUP,
15254                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15255                                         NULL, "cannot create policy "
15256                                         "fate queue for this color");
15257                                 act_cnt->queue =
15258                                 ((const struct rte_flow_action_queue *)
15259                                         (act->conf))->index;
15260                                 act_cnt->fate_action =
15261                                         MLX5_FLOW_FATE_QUEUE;
15262                                 dev_flow.handle->fate_action =
15263                                         MLX5_FLOW_FATE_QUEUE;
15264                                 mtr_policy->is_queue = 1;
15265                                 action_flags |= MLX5_FLOW_ACTION_QUEUE;
15266                                 break;
15267                         }
15268                         case RTE_FLOW_ACTION_TYPE_RSS:
15269                         {
15270                                 int rss_size;
15271
15272                                 if (i >= MLX5_MTR_RTE_COLORS)
15273                                         return -rte_mtr_error_set(error,
15274                                           ENOTSUP,
15275                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15276                                           NULL,
15277                                           "cannot create policy "
15278                                           "rss action for this color");
15279                                 /*
15280                                  * Save RSS conf into policy struct
15281                                  * for translate stage.
15282                                  */
15283                                 rss_size = (int)rte_flow_conv
15284                                         (RTE_FLOW_CONV_OP_ACTION,
15285                                         NULL, 0, act, &flow_err);
15286                                 if (rss_size <= 0)
15287                                         return -rte_mtr_error_set(error,
15288                                           ENOTSUP,
15289                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15290                                           NULL, "Get the wrong "
15291                                           "rss action struct size");
15292                                 act_cnt->rss = mlx5_malloc(MLX5_MEM_ZERO,
15293                                                 rss_size, 0, SOCKET_ID_ANY);
15294                                 if (!act_cnt->rss)
15295                                         return -rte_mtr_error_set(error,
15296                                           ENOTSUP,
15297                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15298                                           NULL,
15299                                           "Fail to malloc rss action memory");
15300                                 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION,
15301                                         act_cnt->rss, rss_size,
15302                                         act, &flow_err);
15303                                 if (ret < 0)
15304                                         return -rte_mtr_error_set(error,
15305                                           ENOTSUP,
15306                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15307                                           NULL, "Fail to save "
15308                                           "rss action into policy struct");
15309                                 act_cnt->fate_action =
15310                                         MLX5_FLOW_FATE_SHARED_RSS;
15311                                 action_flags |= MLX5_FLOW_ACTION_RSS;
15312                                 break;
15313                         }
15314                         case RTE_FLOW_ACTION_TYPE_PORT_ID:
15315                         {
15316                                 struct mlx5_flow_dv_port_id_action_resource
15317                                         port_id_resource;
15318                                 uint32_t port_id = 0;
15319
15320                                 if (i >= MLX5_MTR_RTE_COLORS)
15321                                         return -rte_mtr_error_set(error,
15322                                         ENOTSUP,
15323                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15324                                         NULL, "cannot create policy "
15325                                         "port action for this color");
15326                                 memset(&port_id_resource, 0,
15327                                         sizeof(port_id_resource));
15328                                 if (flow_dv_translate_action_port_id(dev, act,
15329                                                 &port_id, &flow_err))
15330                                         return -rte_mtr_error_set(error,
15331                                         ENOTSUP,
15332                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15333                                         NULL, "cannot translate "
15334                                         "policy port action");
15335                                 port_id_resource.port_id = port_id;
15336                                 if (flow_dv_port_id_action_resource_register
15337                                         (dev, &port_id_resource,
15338                                         &dev_flow, &flow_err))
15339                                         return -rte_mtr_error_set(error,
15340                                         ENOTSUP,
15341                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15342                                         NULL, "cannot setup "
15343                                         "policy port action");
15344                                 act_cnt->rix_port_id_action =
15345                                         dev_flow.handle->rix_port_id_action;
15346                                 act_cnt->fate_action =
15347                                         MLX5_FLOW_FATE_PORT_ID;
15348                                 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
15349                                 break;
15350                         }
15351                         case RTE_FLOW_ACTION_TYPE_JUMP:
15352                         {
15353                                 uint32_t jump_group = 0;
15354                                 uint32_t table = 0;
15355                                 struct mlx5_flow_tbl_data_entry *tbl_data;
15356                                 struct flow_grp_info grp_info = {
15357                                         .external = !!dev_flow.external,
15358                                         .transfer = !!transfer,
15359                                         .fdb_def_rule = !!priv->fdb_def_rule,
15360                                         .std_tbl_fix = 0,
15361                                         .skip_scale = dev_flow.skip_scale &
15362                                         (1 << MLX5_SCALE_FLOW_GROUP_BIT),
15363                                 };
15364                                 struct mlx5_flow_meter_sub_policy *sub_policy =
15365                                 mtr_policy->sub_policys[domain][0];
15366
15367                                 if (i >= MLX5_MTR_RTE_COLORS)
15368                                         return -rte_mtr_error_set(error,
15369                                           ENOTSUP,
15370                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15371                                           NULL,
15372                                           "cannot create policy "
15373                                           "jump action for this color");
15374                                 jump_group =
15375                                 ((const struct rte_flow_action_jump *)
15376                                                         act->conf)->group;
15377                                 if (mlx5_flow_group_to_table(dev, NULL,
15378                                                        jump_group,
15379                                                        &table,
15380                                                        &grp_info, &flow_err))
15381                                         return -rte_mtr_error_set(error,
15382                                         ENOTSUP,
15383                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15384                                         NULL, "cannot setup "
15385                                         "policy jump action");
15386                                 sub_policy->jump_tbl[i] =
15387                                 flow_dv_tbl_resource_get(dev,
15388                                         table, egress,
15389                                         transfer,
15390                                         !!dev_flow.external,
15391                                         NULL, jump_group, 0,
15392                                         0, &flow_err);
15393                                 if
15394                                 (!sub_policy->jump_tbl[i])
15395                                         return  -rte_mtr_error_set(error,
15396                                         ENOTSUP,
15397                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15398                                         NULL, "cannot create jump action.");
15399                                 tbl_data = container_of
15400                                 (sub_policy->jump_tbl[i],
15401                                 struct mlx5_flow_tbl_data_entry, tbl);
15402                                 act_cnt->dr_jump_action[domain] =
15403                                         tbl_data->jump.action;
15404                                 act_cnt->fate_action =
15405                                         MLX5_FLOW_FATE_JUMP;
15406                                 action_flags |= MLX5_FLOW_ACTION_JUMP;
15407                                 break;
15408                         }
15409                         case RTE_FLOW_ACTION_TYPE_METER:
15410                         {
15411                                 const struct rte_flow_action_meter *mtr;
15412                                 struct mlx5_flow_meter_info *next_fm;
15413                                 struct mlx5_flow_meter_policy *next_policy;
15414                                 struct rte_flow_action tag_action;
15415                                 struct mlx5_rte_flow_action_set_tag set_tag;
15416                                 uint32_t next_mtr_idx = 0;
15417
15418                                 mtr = act->conf;
15419                                 next_fm = mlx5_flow_meter_find(priv,
15420                                                         mtr->mtr_id,
15421                                                         &next_mtr_idx);
15422                                 if (!next_fm)
15423                                         return -rte_mtr_error_set(error, EINVAL,
15424                                                 RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
15425                                                 "Fail to find next meter.");
15426                                 if (next_fm->def_policy)
15427                                         return -rte_mtr_error_set(error, EINVAL,
15428                                                 RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
15429                                 "Hierarchy only supports termination meter.");
15430                                 next_policy = mlx5_flow_meter_policy_find(dev,
15431                                                 next_fm->policy_id, NULL);
15432                                 MLX5_ASSERT(next_policy);
15433                                 if (next_fm->drop_cnt) {
15434                                         set_tag.id =
15435                                                 (enum modify_reg)
15436                                                 mlx5_flow_get_reg_id(dev,
15437                                                 MLX5_MTR_ID,
15438                                                 0,
15439                                                 (struct rte_flow_error *)error);
15440                                         set_tag.offset = (priv->mtr_reg_share ?
15441                                                 MLX5_MTR_COLOR_BITS : 0);
15442                                         set_tag.length = (priv->mtr_reg_share ?
15443                                                MLX5_MTR_IDLE_BITS_IN_COLOR_REG :
15444                                                MLX5_REG_BITS);
15445                                         set_tag.data = next_mtr_idx;
15446                                         tag_action.type =
15447                                                 (enum rte_flow_action_type)
15448                                                 MLX5_RTE_FLOW_ACTION_TYPE_TAG;
15449                                         tag_action.conf = &set_tag;
15450                                         if (flow_dv_convert_action_set_reg
15451                                                 (mhdr_res, &tag_action,
15452                                                 (struct rte_flow_error *)error))
15453                                                 return -rte_errno;
15454                                         action_flags |=
15455                                                 MLX5_FLOW_ACTION_SET_TAG;
15456                                 }
15457                                 act_cnt->fate_action = MLX5_FLOW_FATE_MTR;
15458                                 act_cnt->next_mtr_id = next_fm->meter_id;
15459                                 act_cnt->next_sub_policy = NULL;
15460                                 mtr_policy->is_hierarchy = 1;
15461                                 mtr_policy->dev = next_policy->dev;
15462                                 action_flags |=
15463                                 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
15464                                 break;
15465                         }
15466                         default:
15467                                 return -rte_mtr_error_set(error, ENOTSUP,
15468                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15469                                           NULL, "action type not supported");
15470                         }
15471                         if (action_flags & MLX5_FLOW_ACTION_SET_TAG) {
15472                                 /* create modify action if needed. */
15473                                 dev_flow.dv.group = 1;
15474                                 if (flow_dv_modify_hdr_resource_register
15475                                         (dev, mhdr_res, &dev_flow, &flow_err))
15476                                         return -rte_mtr_error_set(error,
15477                                                 ENOTSUP,
15478                                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
15479                                                 NULL, "cannot register policy "
15480                                                 "set tag action");
15481                                 act_cnt->modify_hdr =
15482                                         dev_flow.handle->dvh.modify_hdr;
15483                         }
15484                 }
15485         }
15486         return 0;
15487 }
15488
15489 /**
15490  * Create policy action per domain, lock free,
15491  * (mutex should be acquired by caller).
15492  * Dispatcher for action type specific call.
15493  *
15494  * @param[in] dev
15495  *   Pointer to the Ethernet device structure.
15496  * @param[in] mtr_policy
15497  *   Meter policy struct.
15498  * @param[in] action
15499  *   Action specification used to create meter actions.
15500  * @param[out] error
15501  *   Perform verbose error reporting if not NULL. Initialized in case of
15502  *   error only.
15503  *
15504  * @return
15505  *   0 on success, otherwise negative errno value.
15506  */
15507 static int
15508 flow_dv_create_mtr_policy_acts(struct rte_eth_dev *dev,
15509                       struct mlx5_flow_meter_policy *mtr_policy,
15510                       const struct rte_flow_action *actions[RTE_COLORS],
15511                       struct rte_mtr_error *error)
15512 {
15513         int ret, i;
15514         uint16_t sub_policy_num;
15515
15516         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15517                 sub_policy_num = (mtr_policy->sub_policy_num >>
15518                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
15519                         MLX5_MTR_SUB_POLICY_NUM_MASK;
15520                 if (sub_policy_num) {
15521                         ret = __flow_dv_create_domain_policy_acts(dev,
15522                                 mtr_policy, actions,
15523                                 (enum mlx5_meter_domain)i, error);
15524                         if (ret)
15525                                 return ret;
15526                 }
15527         }
15528         return 0;
15529 }
15530
15531 /**
15532  * Query a DV flow rule for its statistics via DevX.
15533  *
15534  * @param[in] dev
15535  *   Pointer to Ethernet device.
15536  * @param[in] cnt_idx
15537  *   Index to the flow counter.
15538  * @param[out] data
15539  *   Data retrieved by the query.
15540  * @param[out] error
15541  *   Perform verbose error reporting if not NULL.
15542  *
15543  * @return
15544  *   0 on success, a negative errno value otherwise and rte_errno is set.
15545  */
15546 static int
15547 flow_dv_query_count(struct rte_eth_dev *dev, uint32_t cnt_idx, void *data,
15548                     struct rte_flow_error *error)
15549 {
15550         struct mlx5_priv *priv = dev->data->dev_private;
15551         struct rte_flow_query_count *qc = data;
15552
15553         if (!priv->config.devx)
15554                 return rte_flow_error_set(error, ENOTSUP,
15555                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15556                                           NULL,
15557                                           "counters are not supported");
15558         if (cnt_idx) {
15559                 uint64_t pkts, bytes;
15560                 struct mlx5_flow_counter *cnt;
15561                 int err = _flow_dv_query_count(dev, cnt_idx, &pkts, &bytes);
15562
15563                 if (err)
15564                         return rte_flow_error_set(error, -err,
15565                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15566                                         NULL, "cannot read counters");
15567                 cnt = flow_dv_counter_get_by_idx(dev, cnt_idx, NULL);
15568                 qc->hits_set = 1;
15569                 qc->bytes_set = 1;
15570                 qc->hits = pkts - cnt->hits;
15571                 qc->bytes = bytes - cnt->bytes;
15572                 if (qc->reset) {
15573                         cnt->hits = pkts;
15574                         cnt->bytes = bytes;
15575                 }
15576                 return 0;
15577         }
15578         return rte_flow_error_set(error, EINVAL,
15579                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15580                                   NULL,
15581                                   "counters are not available");
15582 }
15583
15584 static int
15585 flow_dv_action_query(struct rte_eth_dev *dev,
15586                      const struct rte_flow_action_handle *handle, void *data,
15587                      struct rte_flow_error *error)
15588 {
15589         struct mlx5_age_param *age_param;
15590         struct rte_flow_query_age *resp;
15591         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
15592         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
15593         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
15594         struct mlx5_priv *priv = dev->data->dev_private;
15595         struct mlx5_aso_ct_action *ct;
15596         uint16_t owner;
15597         uint32_t dev_idx;
15598
15599         switch (type) {
15600         case MLX5_INDIRECT_ACTION_TYPE_AGE:
15601                 age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params;
15602                 resp = data;
15603                 resp->aged = __atomic_load_n(&age_param->state,
15604                                               __ATOMIC_RELAXED) == AGE_TMOUT ?
15605                                                                           1 : 0;
15606                 resp->sec_since_last_hit_valid = !resp->aged;
15607                 if (resp->sec_since_last_hit_valid)
15608                         resp->sec_since_last_hit = __atomic_load_n
15609                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
15610                 return 0;
15611         case MLX5_INDIRECT_ACTION_TYPE_COUNT:
15612                 return flow_dv_query_count(dev, idx, data, error);
15613         case MLX5_INDIRECT_ACTION_TYPE_CT:
15614                 owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx);
15615                 if (owner != PORT_ID(priv))
15616                         return rte_flow_error_set(error, EACCES,
15617                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15618                                         NULL,
15619                                         "CT object owned by another port");
15620                 dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx);
15621                 ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx);
15622                 MLX5_ASSERT(ct);
15623                 if (!ct->refcnt)
15624                         return rte_flow_error_set(error, EFAULT,
15625                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15626                                         NULL,
15627                                         "CT object is inactive");
15628                 ((struct rte_flow_action_conntrack *)data)->peer_port =
15629                                                         ct->peer;
15630                 ((struct rte_flow_action_conntrack *)data)->is_original_dir =
15631                                                         ct->is_original;
15632                 if (mlx5_aso_ct_query_by_wqe(priv->sh, ct, data))
15633                         return rte_flow_error_set(error, EIO,
15634                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15635                                         NULL,
15636                                         "Failed to query CT context");
15637                 return 0;
15638         default:
15639                 return rte_flow_error_set(error, ENOTSUP,
15640                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
15641                                           "action type query not supported");
15642         }
15643 }
15644
15645 /**
15646  * Query a flow rule AGE action for aging information.
15647  *
15648  * @param[in] dev
15649  *   Pointer to Ethernet device.
15650  * @param[in] flow
15651  *   Pointer to the sub flow.
15652  * @param[out] data
15653  *   data retrieved by the query.
15654  * @param[out] error
15655  *   Perform verbose error reporting if not NULL.
15656  *
15657  * @return
15658  *   0 on success, a negative errno value otherwise and rte_errno is set.
15659  */
15660 static int
15661 flow_dv_query_age(struct rte_eth_dev *dev, struct rte_flow *flow,
15662                   void *data, struct rte_flow_error *error)
15663 {
15664         struct rte_flow_query_age *resp = data;
15665         struct mlx5_age_param *age_param;
15666
15667         if (flow->age) {
15668                 struct mlx5_aso_age_action *act =
15669                                      flow_aso_age_get_by_idx(dev, flow->age);
15670
15671                 age_param = &act->age_params;
15672         } else if (flow->counter) {
15673                 age_param = flow_dv_counter_idx_get_age(dev, flow->counter);
15674
15675                 if (!age_param || !age_param->timeout)
15676                         return rte_flow_error_set
15677                                         (error, EINVAL,
15678                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15679                                          NULL, "cannot read age data");
15680         } else {
15681                 return rte_flow_error_set(error, EINVAL,
15682                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15683                                           NULL, "age data not available");
15684         }
15685         resp->aged = __atomic_load_n(&age_param->state, __ATOMIC_RELAXED) ==
15686                                      AGE_TMOUT ? 1 : 0;
15687         resp->sec_since_last_hit_valid = !resp->aged;
15688         if (resp->sec_since_last_hit_valid)
15689                 resp->sec_since_last_hit = __atomic_load_n
15690                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
15691         return 0;
15692 }
15693
15694 /**
15695  * Query a flow.
15696  *
15697  * @see rte_flow_query()
15698  * @see rte_flow_ops
15699  */
15700 static int
15701 flow_dv_query(struct rte_eth_dev *dev,
15702               struct rte_flow *flow __rte_unused,
15703               const struct rte_flow_action *actions __rte_unused,
15704               void *data __rte_unused,
15705               struct rte_flow_error *error __rte_unused)
15706 {
15707         int ret = -EINVAL;
15708
15709         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
15710                 switch (actions->type) {
15711                 case RTE_FLOW_ACTION_TYPE_VOID:
15712                         break;
15713                 case RTE_FLOW_ACTION_TYPE_COUNT:
15714                         ret = flow_dv_query_count(dev, flow->counter, data,
15715                                                   error);
15716                         break;
15717                 case RTE_FLOW_ACTION_TYPE_AGE:
15718                         ret = flow_dv_query_age(dev, flow, data, error);
15719                         break;
15720                 default:
15721                         return rte_flow_error_set(error, ENOTSUP,
15722                                                   RTE_FLOW_ERROR_TYPE_ACTION,
15723                                                   actions,
15724                                                   "action not supported");
15725                 }
15726         }
15727         return ret;
15728 }
15729
15730 /**
15731  * Destroy the meter table set.
15732  * Lock free, (mutex should be acquired by caller).
15733  *
15734  * @param[in] dev
15735  *   Pointer to Ethernet device.
15736  * @param[in] fm
15737  *   Meter information table.
15738  */
15739 static void
15740 flow_dv_destroy_mtr_tbls(struct rte_eth_dev *dev,
15741                         struct mlx5_flow_meter_info *fm)
15742 {
15743         struct mlx5_priv *priv = dev->data->dev_private;
15744         int i;
15745
15746         if (!fm || !priv->config.dv_flow_en)
15747                 return;
15748         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15749                 if (fm->drop_rule[i]) {
15750                         claim_zero(mlx5_flow_os_destroy_flow(fm->drop_rule[i]));
15751                         fm->drop_rule[i] = NULL;
15752                 }
15753         }
15754 }
15755
15756 static void
15757 flow_dv_destroy_mtr_drop_tbls(struct rte_eth_dev *dev)
15758 {
15759         struct mlx5_priv *priv = dev->data->dev_private;
15760         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
15761         struct mlx5_flow_tbl_data_entry *tbl;
15762         int i, j;
15763
15764         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15765                 if (mtrmng->def_rule[i]) {
15766                         claim_zero(mlx5_flow_os_destroy_flow
15767                                         (mtrmng->def_rule[i]));
15768                         mtrmng->def_rule[i] = NULL;
15769                 }
15770                 if (mtrmng->def_matcher[i]) {
15771                         tbl = container_of(mtrmng->def_matcher[i]->tbl,
15772                                 struct mlx5_flow_tbl_data_entry, tbl);
15773                         mlx5_list_unregister(tbl->matchers,
15774                                              &mtrmng->def_matcher[i]->entry);
15775                         mtrmng->def_matcher[i] = NULL;
15776                 }
15777                 for (j = 0; j < MLX5_REG_BITS; j++) {
15778                         if (mtrmng->drop_matcher[i][j]) {
15779                                 tbl =
15780                                 container_of(mtrmng->drop_matcher[i][j]->tbl,
15781                                              struct mlx5_flow_tbl_data_entry,
15782                                              tbl);
15783                                 mlx5_list_unregister(tbl->matchers,
15784                                             &mtrmng->drop_matcher[i][j]->entry);
15785                                 mtrmng->drop_matcher[i][j] = NULL;
15786                         }
15787                 }
15788                 if (mtrmng->drop_tbl[i]) {
15789                         flow_dv_tbl_resource_release(MLX5_SH(dev),
15790                                 mtrmng->drop_tbl[i]);
15791                         mtrmng->drop_tbl[i] = NULL;
15792                 }
15793         }
15794 }
15795
15796 /* Number of meter flow actions, count and jump or count and drop. */
15797 #define METER_ACTIONS 2
15798
15799 static void
15800 __flow_dv_destroy_domain_def_policy(struct rte_eth_dev *dev,
15801                               enum mlx5_meter_domain domain)
15802 {
15803         struct mlx5_priv *priv = dev->data->dev_private;
15804         struct mlx5_flow_meter_def_policy *def_policy =
15805                         priv->sh->mtrmng->def_policy[domain];
15806
15807         __flow_dv_destroy_sub_policy_rules(dev, &def_policy->sub_policy);
15808         mlx5_free(def_policy);
15809         priv->sh->mtrmng->def_policy[domain] = NULL;
15810 }
15811
15812 /**
15813  * Destroy the default policy table set.
15814  *
15815  * @param[in] dev
15816  *   Pointer to Ethernet device.
15817  */
15818 static void
15819 flow_dv_destroy_def_policy(struct rte_eth_dev *dev)
15820 {
15821         struct mlx5_priv *priv = dev->data->dev_private;
15822         int i;
15823
15824         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++)
15825                 if (priv->sh->mtrmng->def_policy[i])
15826                         __flow_dv_destroy_domain_def_policy(dev,
15827                                         (enum mlx5_meter_domain)i);
15828         priv->sh->mtrmng->def_policy_id = MLX5_INVALID_POLICY_ID;
15829 }
15830
15831 static int
15832 __flow_dv_create_policy_flow(struct rte_eth_dev *dev,
15833                         uint32_t color_reg_c_idx,
15834                         enum rte_color color, void *matcher_object,
15835                         int actions_n, void *actions,
15836                         bool match_src_port, const struct rte_flow_item *item,
15837                         void **rule, const struct rte_flow_attr *attr)
15838 {
15839         int ret;
15840         struct mlx5_flow_dv_match_params value = {
15841                 .size = sizeof(value.buf),
15842         };
15843         struct mlx5_flow_dv_match_params matcher = {
15844                 .size = sizeof(matcher.buf),
15845         };
15846         struct mlx5_priv *priv = dev->data->dev_private;
15847         uint8_t misc_mask;
15848
15849         if (match_src_port && (priv->representor || priv->master)) {
15850                 if (flow_dv_translate_item_port_id(dev, matcher.buf,
15851                                                    value.buf, item, attr)) {
15852                         DRV_LOG(ERR,
15853                         "Failed to create meter policy flow with port.");
15854                         return -1;
15855                 }
15856         }
15857         flow_dv_match_meta_reg(matcher.buf, value.buf,
15858                                 (enum modify_reg)color_reg_c_idx,
15859                                 rte_col_2_mlx5_col(color),
15860                                 UINT32_MAX);
15861         misc_mask = flow_dv_matcher_enable(value.buf);
15862         __flow_dv_adjust_buf_size(&value.size, misc_mask);
15863         ret = mlx5_flow_os_create_flow(matcher_object,
15864                         (void *)&value, actions_n, actions, rule);
15865         if (ret) {
15866                 DRV_LOG(ERR, "Failed to create meter policy flow.");
15867                 return -1;
15868         }
15869         return 0;
15870 }
15871
15872 static int
15873 __flow_dv_create_policy_matcher(struct rte_eth_dev *dev,
15874                         uint32_t color_reg_c_idx,
15875                         uint16_t priority,
15876                         struct mlx5_flow_meter_sub_policy *sub_policy,
15877                         const struct rte_flow_attr *attr,
15878                         bool match_src_port,
15879                         const struct rte_flow_item *item,
15880                         struct mlx5_flow_dv_matcher **policy_matcher,
15881                         struct rte_flow_error *error)
15882 {
15883         struct mlx5_list_entry *entry;
15884         struct mlx5_flow_tbl_resource *tbl_rsc = sub_policy->tbl_rsc;
15885         struct mlx5_flow_dv_matcher matcher = {
15886                 .mask = {
15887                         .size = sizeof(matcher.mask.buf),
15888                 },
15889                 .tbl = tbl_rsc,
15890         };
15891         struct mlx5_flow_dv_match_params value = {
15892                 .size = sizeof(value.buf),
15893         };
15894         struct mlx5_flow_cb_ctx ctx = {
15895                 .error = error,
15896                 .data = &matcher,
15897         };
15898         struct mlx5_flow_tbl_data_entry *tbl_data;
15899         struct mlx5_priv *priv = dev->data->dev_private;
15900         uint32_t color_mask = (UINT32_C(1) << MLX5_MTR_COLOR_BITS) - 1;
15901
15902         if (match_src_port && (priv->representor || priv->master)) {
15903                 if (flow_dv_translate_item_port_id(dev, matcher.mask.buf,
15904                                                    value.buf, item, attr)) {
15905                         DRV_LOG(ERR,
15906                         "Failed to register meter drop matcher with port.");
15907                         return -1;
15908                 }
15909         }
15910         tbl_data = container_of(tbl_rsc, struct mlx5_flow_tbl_data_entry, tbl);
15911         if (priority < RTE_COLOR_RED)
15912                 flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
15913                         (enum modify_reg)color_reg_c_idx, 0, color_mask);
15914         matcher.priority = priority;
15915         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
15916                                         matcher.mask.size);
15917         entry = mlx5_list_register(tbl_data->matchers, &ctx);
15918         if (!entry) {
15919                 DRV_LOG(ERR, "Failed to register meter drop matcher.");
15920                 return -1;
15921         }
15922         *policy_matcher =
15923                 container_of(entry, struct mlx5_flow_dv_matcher, entry);
15924         return 0;
15925 }
15926
15927 /**
15928  * Create the policy rules per domain.
15929  *
15930  * @param[in] dev
15931  *   Pointer to Ethernet device.
15932  * @param[in] sub_policy
15933  *    Pointer to sub policy table..
15934  * @param[in] egress
15935  *   Direction of the table.
15936  * @param[in] transfer
15937  *   E-Switch or NIC flow.
15938  * @param[in] acts
15939  *   Pointer to policy action list per color.
15940  *
15941  * @return
15942  *   0 on success, -1 otherwise.
15943  */
15944 static int
15945 __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev,
15946                 struct mlx5_flow_meter_sub_policy *sub_policy,
15947                 uint8_t egress, uint8_t transfer, bool match_src_port,
15948                 struct mlx5_meter_policy_acts acts[RTE_COLORS])
15949 {
15950         struct mlx5_priv *priv = dev->data->dev_private;
15951         struct rte_flow_error flow_err;
15952         uint32_t color_reg_c_idx;
15953         struct rte_flow_attr attr = {
15954                 .group = MLX5_FLOW_TABLE_LEVEL_POLICY,
15955                 .priority = 0,
15956                 .ingress = 0,
15957                 .egress = !!egress,
15958                 .transfer = !!transfer,
15959                 .reserved = 0,
15960         };
15961         int i;
15962         int ret = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &flow_err);
15963         struct mlx5_sub_policy_color_rule *color_rule;
15964
15965         if (ret < 0)
15966                 return -1;
15967         /* Create policy table with POLICY level. */
15968         if (!sub_policy->tbl_rsc)
15969                 sub_policy->tbl_rsc = flow_dv_tbl_resource_get(dev,
15970                                 MLX5_FLOW_TABLE_LEVEL_POLICY,
15971                                 egress, transfer, false, NULL, 0, 0,
15972                                 sub_policy->idx, &flow_err);
15973         if (!sub_policy->tbl_rsc) {
15974                 DRV_LOG(ERR,
15975                         "Failed to create meter sub policy table.");
15976                 return -1;
15977         }
15978         /* Prepare matchers. */
15979         color_reg_c_idx = ret;
15980         for (i = 0; i < RTE_COLORS; i++) {
15981                 TAILQ_INIT(&sub_policy->color_rules[i]);
15982                 if (i == RTE_COLOR_YELLOW || !acts[i].actions_n)
15983                         continue;
15984                 color_rule = mlx5_malloc(MLX5_MEM_ZERO,
15985                                 sizeof(struct mlx5_sub_policy_color_rule),
15986                                 0, SOCKET_ID_ANY);
15987                 if (!color_rule) {
15988                         DRV_LOG(ERR, "No memory to create color rule.");
15989                         goto err_exit;
15990                 }
15991                 color_rule->src_port = priv->representor_id;
15992                 attr.priority = i;
15993                 /* Create matchers for Color. */
15994                 if (__flow_dv_create_policy_matcher(dev,
15995                                 color_reg_c_idx, i, sub_policy, &attr,
15996                                 (i != RTE_COLOR_RED ? match_src_port : false),
15997                                 NULL, &color_rule->matcher, &flow_err)) {
15998                         DRV_LOG(ERR, "Failed to create color matcher.");
15999                         goto err_exit;
16000                 }
16001                 /* Create flow, matching color. */
16002                 if (__flow_dv_create_policy_flow(dev,
16003                                 color_reg_c_idx, (enum rte_color)i,
16004                                 color_rule->matcher->matcher_object,
16005                                 acts[i].actions_n,
16006                                 acts[i].dv_actions,
16007                                 (i != RTE_COLOR_RED ? match_src_port : false),
16008                                 NULL, &color_rule->rule,
16009                                 &attr)) {
16010                         DRV_LOG(ERR, "Failed to create color rule.");
16011                         goto err_exit;
16012                 }
16013                 TAILQ_INSERT_TAIL(&sub_policy->color_rules[i],
16014                                   color_rule, next_port);
16015         }
16016         return 0;
16017 err_exit:
16018         if (color_rule) {
16019                 if (color_rule->rule)
16020                         mlx5_flow_os_destroy_flow(color_rule->rule);
16021                 if (color_rule->matcher) {
16022                         struct mlx5_flow_tbl_data_entry *tbl =
16023                                 container_of(color_rule->matcher->tbl,
16024                                                 typeof(*tbl), tbl);
16025                         mlx5_list_unregister(tbl->matchers,
16026                                                 &color_rule->matcher->entry);
16027                 }
16028                 mlx5_free(color_rule);
16029         }
16030         return -1;
16031 }
16032
16033 static int
16034 __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev,
16035                         struct mlx5_flow_meter_policy *mtr_policy,
16036                         struct mlx5_flow_meter_sub_policy *sub_policy,
16037                         uint32_t domain)
16038 {
16039         struct mlx5_priv *priv = dev->data->dev_private;
16040         struct mlx5_meter_policy_acts acts[RTE_COLORS];
16041         struct mlx5_flow_dv_tag_resource *tag;
16042         struct mlx5_flow_dv_port_id_action_resource *port_action;
16043         struct mlx5_hrxq *hrxq;
16044         struct mlx5_flow_meter_info *next_fm = NULL;
16045         struct mlx5_flow_meter_policy *next_policy;
16046         struct mlx5_flow_meter_sub_policy *next_sub_policy;
16047         struct mlx5_flow_tbl_data_entry *tbl_data;
16048         struct rte_flow_error error;
16049         uint8_t egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
16050         uint8_t transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
16051         bool mtr_first = egress || (transfer && priv->representor_id != UINT16_MAX);
16052         bool match_src_port = false;
16053         int i;
16054
16055         for (i = 0; i < RTE_COLORS; i++) {
16056                 acts[i].actions_n = 0;
16057                 if (i == RTE_COLOR_YELLOW)
16058                         continue;
16059                 if (i == RTE_COLOR_RED) {
16060                         /* Only support drop on red. */
16061                         acts[i].dv_actions[0] =
16062                         mtr_policy->dr_drop_action[domain];
16063                         acts[i].actions_n = 1;
16064                         continue;
16065                 }
16066                 if (mtr_policy->act_cnt[i].fate_action == MLX5_FLOW_FATE_MTR) {
16067                         struct rte_flow_attr attr = {
16068                                 .transfer = transfer
16069                         };
16070
16071                         next_fm = mlx5_flow_meter_find(priv,
16072                                         mtr_policy->act_cnt[i].next_mtr_id,
16073                                         NULL);
16074                         if (!next_fm) {
16075                                 DRV_LOG(ERR,
16076                                         "Failed to get next hierarchy meter.");
16077                                 goto err_exit;
16078                         }
16079                         if (mlx5_flow_meter_attach(priv, next_fm,
16080                                                    &attr, &error)) {
16081                                 DRV_LOG(ERR, "%s", error.message);
16082                                 next_fm = NULL;
16083                                 goto err_exit;
16084                         }
16085                         /* Meter action must be the first for TX. */
16086                         if (mtr_first) {
16087                                 acts[i].dv_actions[acts[i].actions_n] =
16088                                         next_fm->meter_action;
16089                                 acts[i].actions_n++;
16090                         }
16091                 }
16092                 if (mtr_policy->act_cnt[i].rix_mark) {
16093                         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG],
16094                                         mtr_policy->act_cnt[i].rix_mark);
16095                         if (!tag) {
16096                                 DRV_LOG(ERR, "Failed to find "
16097                                 "mark action for policy.");
16098                                 goto err_exit;
16099                         }
16100                         acts[i].dv_actions[acts[i].actions_n] =
16101                                                 tag->action;
16102                         acts[i].actions_n++;
16103                 }
16104                 if (mtr_policy->act_cnt[i].modify_hdr) {
16105                         acts[i].dv_actions[acts[i].actions_n] =
16106                         mtr_policy->act_cnt[i].modify_hdr->action;
16107                         acts[i].actions_n++;
16108                 }
16109                 if (mtr_policy->act_cnt[i].fate_action) {
16110                         switch (mtr_policy->act_cnt[i].fate_action) {
16111                         case MLX5_FLOW_FATE_PORT_ID:
16112                                 port_action = mlx5_ipool_get
16113                                         (priv->sh->ipool[MLX5_IPOOL_PORT_ID],
16114                                 mtr_policy->act_cnt[i].rix_port_id_action);
16115                                 if (!port_action) {
16116                                         DRV_LOG(ERR, "Failed to find "
16117                                                 "port action for policy.");
16118                                         goto err_exit;
16119                                 }
16120                                 acts[i].dv_actions[acts[i].actions_n] =
16121                                 port_action->action;
16122                                 acts[i].actions_n++;
16123                                 mtr_policy->dev = dev;
16124                                 match_src_port = true;
16125                                 break;
16126                         case MLX5_FLOW_FATE_DROP:
16127                         case MLX5_FLOW_FATE_JUMP:
16128                                 acts[i].dv_actions[acts[i].actions_n] =
16129                                 mtr_policy->act_cnt[i].dr_jump_action[domain];
16130                                 acts[i].actions_n++;
16131                                 break;
16132                         case MLX5_FLOW_FATE_SHARED_RSS:
16133                         case MLX5_FLOW_FATE_QUEUE:
16134                                 hrxq = mlx5_ipool_get
16135                                 (priv->sh->ipool[MLX5_IPOOL_HRXQ],
16136                                 sub_policy->rix_hrxq[i]);
16137                                 if (!hrxq) {
16138                                         DRV_LOG(ERR, "Failed to find "
16139                                                 "queue action for policy.");
16140                                         goto err_exit;
16141                                 }
16142                                 acts[i].dv_actions[acts[i].actions_n] =
16143                                 hrxq->action;
16144                                 acts[i].actions_n++;
16145                                 break;
16146                         case MLX5_FLOW_FATE_MTR:
16147                                 if (!next_fm) {
16148                                         DRV_LOG(ERR,
16149                                                 "No next hierarchy meter.");
16150                                         goto err_exit;
16151                                 }
16152                                 if (!mtr_first) {
16153                                         acts[i].dv_actions[acts[i].actions_n] =
16154                                                         next_fm->meter_action;
16155                                         acts[i].actions_n++;
16156                                 }
16157                                 if (mtr_policy->act_cnt[i].next_sub_policy) {
16158                                         next_sub_policy =
16159                                         mtr_policy->act_cnt[i].next_sub_policy;
16160                                 } else {
16161                                         next_policy =
16162                                                 mlx5_flow_meter_policy_find(dev,
16163                                                 next_fm->policy_id, NULL);
16164                                         MLX5_ASSERT(next_policy);
16165                                         next_sub_policy =
16166                                         next_policy->sub_policys[domain][0];
16167                                 }
16168                                 tbl_data =
16169                                         container_of(next_sub_policy->tbl_rsc,
16170                                         struct mlx5_flow_tbl_data_entry, tbl);
16171                                 acts[i].dv_actions[acts[i].actions_n++] =
16172                                                         tbl_data->jump.action;
16173                                 if (mtr_policy->act_cnt[i].modify_hdr)
16174                                         match_src_port = !!transfer;
16175                                 break;
16176                         default:
16177                                 /*Queue action do nothing*/
16178                                 break;
16179                         }
16180                 }
16181         }
16182         if (__flow_dv_create_domain_policy_rules(dev, sub_policy,
16183                                 egress, transfer, match_src_port, acts)) {
16184                 DRV_LOG(ERR,
16185                 "Failed to create policy rules per domain.");
16186                 goto err_exit;
16187         }
16188         return 0;
16189 err_exit:
16190         if (next_fm)
16191                 mlx5_flow_meter_detach(priv, next_fm);
16192         return -1;
16193 }
16194
16195 /**
16196  * Create the policy rules.
16197  *
16198  * @param[in] dev
16199  *   Pointer to Ethernet device.
16200  * @param[in,out] mtr_policy
16201  *   Pointer to meter policy table.
16202  *
16203  * @return
16204  *   0 on success, -1 otherwise.
16205  */
16206 static int
16207 flow_dv_create_policy_rules(struct rte_eth_dev *dev,
16208                              struct mlx5_flow_meter_policy *mtr_policy)
16209 {
16210         int i;
16211         uint16_t sub_policy_num;
16212
16213         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16214                 sub_policy_num = (mtr_policy->sub_policy_num >>
16215                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
16216                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16217                 if (!sub_policy_num)
16218                         continue;
16219                 /* Prepare actions list and create policy rules. */
16220                 if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
16221                         mtr_policy->sub_policys[i][0], i)) {
16222                         DRV_LOG(ERR,
16223                         "Failed to create policy action list per domain.");
16224                         return -1;
16225                 }
16226         }
16227         return 0;
16228 }
16229
16230 static int
16231 __flow_dv_create_domain_def_policy(struct rte_eth_dev *dev, uint32_t domain)
16232 {
16233         struct mlx5_priv *priv = dev->data->dev_private;
16234         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
16235         struct mlx5_flow_meter_def_policy *def_policy;
16236         struct mlx5_flow_tbl_resource *jump_tbl;
16237         struct mlx5_flow_tbl_data_entry *tbl_data;
16238         uint8_t egress, transfer;
16239         struct rte_flow_error error;
16240         struct mlx5_meter_policy_acts acts[RTE_COLORS];
16241         int ret;
16242
16243         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
16244         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
16245         def_policy = mtrmng->def_policy[domain];
16246         if (!def_policy) {
16247                 def_policy = mlx5_malloc(MLX5_MEM_ZERO,
16248                         sizeof(struct mlx5_flow_meter_def_policy),
16249                         RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
16250                 if (!def_policy) {
16251                         DRV_LOG(ERR, "Failed to alloc "
16252                                         "default policy table.");
16253                         goto def_policy_error;
16254                 }
16255                 mtrmng->def_policy[domain] = def_policy;
16256                 /* Create the meter suffix table with SUFFIX level. */
16257                 jump_tbl = flow_dv_tbl_resource_get(dev,
16258                                 MLX5_FLOW_TABLE_LEVEL_METER,
16259                                 egress, transfer, false, NULL, 0,
16260                                 0, MLX5_MTR_TABLE_ID_SUFFIX, &error);
16261                 if (!jump_tbl) {
16262                         DRV_LOG(ERR,
16263                                 "Failed to create meter suffix table.");
16264                         goto def_policy_error;
16265                 }
16266                 def_policy->sub_policy.jump_tbl[RTE_COLOR_GREEN] = jump_tbl;
16267                 tbl_data = container_of(jump_tbl,
16268                                 struct mlx5_flow_tbl_data_entry, tbl);
16269                 def_policy->dr_jump_action[RTE_COLOR_GREEN] =
16270                                                 tbl_data->jump.action;
16271                 acts[RTE_COLOR_GREEN].dv_actions[0] =
16272                                                 tbl_data->jump.action;
16273                 acts[RTE_COLOR_GREEN].actions_n = 1;
16274                 /* Create jump action to the drop table. */
16275                 if (!mtrmng->drop_tbl[domain]) {
16276                         mtrmng->drop_tbl[domain] = flow_dv_tbl_resource_get
16277                                 (dev, MLX5_FLOW_TABLE_LEVEL_METER,
16278                                 egress, transfer, false, NULL, 0,
16279                                 0, MLX5_MTR_TABLE_ID_DROP, &error);
16280                         if (!mtrmng->drop_tbl[domain]) {
16281                                 DRV_LOG(ERR, "Failed to create "
16282                                 "meter drop table for default policy.");
16283                                 goto def_policy_error;
16284                         }
16285                 }
16286                 tbl_data = container_of(mtrmng->drop_tbl[domain],
16287                                 struct mlx5_flow_tbl_data_entry, tbl);
16288                 def_policy->dr_jump_action[RTE_COLOR_RED] =
16289                                                 tbl_data->jump.action;
16290                 acts[RTE_COLOR_RED].dv_actions[0] = tbl_data->jump.action;
16291                 acts[RTE_COLOR_RED].actions_n = 1;
16292                 /* Create default policy rules. */
16293                 ret = __flow_dv_create_domain_policy_rules(dev,
16294                                         &def_policy->sub_policy,
16295                                         egress, transfer, false, acts);
16296                 if (ret) {
16297                         DRV_LOG(ERR, "Failed to create "
16298                                 "default policy rules.");
16299                                 goto def_policy_error;
16300                 }
16301         }
16302         return 0;
16303 def_policy_error:
16304         __flow_dv_destroy_domain_def_policy(dev,
16305                         (enum mlx5_meter_domain)domain);
16306         return -1;
16307 }
16308
16309 /**
16310  * Create the default policy table set.
16311  *
16312  * @param[in] dev
16313  *   Pointer to Ethernet device.
16314  * @return
16315  *   0 on success, -1 otherwise.
16316  */
16317 static int
16318 flow_dv_create_def_policy(struct rte_eth_dev *dev)
16319 {
16320         struct mlx5_priv *priv = dev->data->dev_private;
16321         int i;
16322
16323         /* Non-termination policy table. */
16324         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16325                 if (!priv->config.dv_esw_en && i == MLX5_MTR_DOMAIN_TRANSFER)
16326                         continue;
16327                 if (__flow_dv_create_domain_def_policy(dev, i)) {
16328                         DRV_LOG(ERR,
16329                         "Failed to create default policy");
16330                         return -1;
16331                 }
16332         }
16333         return 0;
16334 }
16335
16336 /**
16337  * Create the needed meter tables.
16338  * Lock free, (mutex should be acquired by caller).
16339  *
16340  * @param[in] dev
16341  *   Pointer to Ethernet device.
16342  * @param[in] fm
16343  *   Meter information table.
16344  * @param[in] mtr_idx
16345  *   Meter index.
16346  * @param[in] domain_bitmap
16347  *   Domain bitmap.
16348  * @return
16349  *   0 on success, -1 otherwise.
16350  */
16351 static int
16352 flow_dv_create_mtr_tbls(struct rte_eth_dev *dev,
16353                         struct mlx5_flow_meter_info *fm,
16354                         uint32_t mtr_idx,
16355                         uint8_t domain_bitmap)
16356 {
16357         struct mlx5_priv *priv = dev->data->dev_private;
16358         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
16359         struct rte_flow_error error;
16360         struct mlx5_flow_tbl_data_entry *tbl_data;
16361         uint8_t egress, transfer;
16362         void *actions[METER_ACTIONS];
16363         int domain, ret, i;
16364         struct mlx5_flow_counter *cnt;
16365         struct mlx5_flow_dv_match_params value = {
16366                 .size = sizeof(value.buf),
16367         };
16368         struct mlx5_flow_dv_match_params matcher_para = {
16369                 .size = sizeof(matcher_para.buf),
16370         };
16371         int mtr_id_reg_c = mlx5_flow_get_reg_id(dev, MLX5_MTR_ID,
16372                                                      0, &error);
16373         uint32_t mtr_id_mask = (UINT32_C(1) << mtrmng->max_mtr_bits) - 1;
16374         uint8_t mtr_id_offset = priv->mtr_reg_share ? MLX5_MTR_COLOR_BITS : 0;
16375         struct mlx5_list_entry *entry;
16376         struct mlx5_flow_dv_matcher matcher = {
16377                 .mask = {
16378                         .size = sizeof(matcher.mask.buf),
16379                 },
16380         };
16381         struct mlx5_flow_dv_matcher *drop_matcher;
16382         struct mlx5_flow_cb_ctx ctx = {
16383                 .error = &error,
16384                 .data = &matcher,
16385         };
16386         uint8_t misc_mask;
16387
16388         if (!priv->mtr_en || mtr_id_reg_c < 0) {
16389                 rte_errno = ENOTSUP;
16390                 return -1;
16391         }
16392         for (domain = 0; domain < MLX5_MTR_DOMAIN_MAX; domain++) {
16393                 if (!(domain_bitmap & (1 << domain)) ||
16394                         (mtrmng->def_rule[domain] && !fm->drop_cnt))
16395                         continue;
16396                 egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
16397                 transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
16398                 /* Create the drop table with METER DROP level. */
16399                 if (!mtrmng->drop_tbl[domain]) {
16400                         mtrmng->drop_tbl[domain] = flow_dv_tbl_resource_get(dev,
16401                                         MLX5_FLOW_TABLE_LEVEL_METER,
16402                                         egress, transfer, false, NULL, 0,
16403                                         0, MLX5_MTR_TABLE_ID_DROP, &error);
16404                         if (!mtrmng->drop_tbl[domain]) {
16405                                 DRV_LOG(ERR, "Failed to create meter drop table.");
16406                                 goto policy_error;
16407                         }
16408                 }
16409                 /* Create default matcher in drop table. */
16410                 matcher.tbl = mtrmng->drop_tbl[domain],
16411                 tbl_data = container_of(mtrmng->drop_tbl[domain],
16412                                 struct mlx5_flow_tbl_data_entry, tbl);
16413                 if (!mtrmng->def_matcher[domain]) {
16414                         flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
16415                                        (enum modify_reg)mtr_id_reg_c,
16416                                        0, 0);
16417                         matcher.priority = MLX5_MTRS_DEFAULT_RULE_PRIORITY;
16418                         matcher.crc = rte_raw_cksum
16419                                         ((const void *)matcher.mask.buf,
16420                                         matcher.mask.size);
16421                         entry = mlx5_list_register(tbl_data->matchers, &ctx);
16422                         if (!entry) {
16423                                 DRV_LOG(ERR, "Failed to register meter "
16424                                 "drop default matcher.");
16425                                 goto policy_error;
16426                         }
16427                         mtrmng->def_matcher[domain] = container_of(entry,
16428                         struct mlx5_flow_dv_matcher, entry);
16429                 }
16430                 /* Create default rule in drop table. */
16431                 if (!mtrmng->def_rule[domain]) {
16432                         i = 0;
16433                         actions[i++] = priv->sh->dr_drop_action;
16434                         flow_dv_match_meta_reg(matcher_para.buf, value.buf,
16435                                 (enum modify_reg)mtr_id_reg_c, 0, 0);
16436                         misc_mask = flow_dv_matcher_enable(value.buf);
16437                         __flow_dv_adjust_buf_size(&value.size, misc_mask);
16438                         ret = mlx5_flow_os_create_flow
16439                                 (mtrmng->def_matcher[domain]->matcher_object,
16440                                 (void *)&value, i, actions,
16441                                 &mtrmng->def_rule[domain]);
16442                         if (ret) {
16443                                 DRV_LOG(ERR, "Failed to create meter "
16444                                 "default drop rule for drop table.");
16445                                 goto policy_error;
16446                         }
16447                 }
16448                 if (!fm->drop_cnt)
16449                         continue;
16450                 MLX5_ASSERT(mtrmng->max_mtr_bits);
16451                 if (!mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1]) {
16452                         /* Create matchers for Drop. */
16453                         flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
16454                                         (enum modify_reg)mtr_id_reg_c, 0,
16455                                         (mtr_id_mask << mtr_id_offset));
16456                         matcher.priority = MLX5_REG_BITS - mtrmng->max_mtr_bits;
16457                         matcher.crc = rte_raw_cksum
16458                                         ((const void *)matcher.mask.buf,
16459                                         matcher.mask.size);
16460                         entry = mlx5_list_register(tbl_data->matchers, &ctx);
16461                         if (!entry) {
16462                                 DRV_LOG(ERR,
16463                                 "Failed to register meter drop matcher.");
16464                                 goto policy_error;
16465                         }
16466                         mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1] =
16467                                 container_of(entry, struct mlx5_flow_dv_matcher,
16468                                              entry);
16469                 }
16470                 drop_matcher =
16471                         mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1];
16472                 /* Create drop rule, matching meter_id only. */
16473                 flow_dv_match_meta_reg(matcher_para.buf, value.buf,
16474                                 (enum modify_reg)mtr_id_reg_c,
16475                                 (mtr_idx << mtr_id_offset), UINT32_MAX);
16476                 i = 0;
16477                 cnt = flow_dv_counter_get_by_idx(dev,
16478                                         fm->drop_cnt, NULL);
16479                 actions[i++] = cnt->action;
16480                 actions[i++] = priv->sh->dr_drop_action;
16481                 misc_mask = flow_dv_matcher_enable(value.buf);
16482                 __flow_dv_adjust_buf_size(&value.size, misc_mask);
16483                 ret = mlx5_flow_os_create_flow(drop_matcher->matcher_object,
16484                                                (void *)&value, i, actions,
16485                                                &fm->drop_rule[domain]);
16486                 if (ret) {
16487                         DRV_LOG(ERR, "Failed to create meter "
16488                                 "drop rule for drop table.");
16489                                 goto policy_error;
16490                 }
16491         }
16492         return 0;
16493 policy_error:
16494         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16495                 if (fm->drop_rule[i]) {
16496                         claim_zero(mlx5_flow_os_destroy_flow
16497                                 (fm->drop_rule[i]));
16498                         fm->drop_rule[i] = NULL;
16499                 }
16500         }
16501         return -1;
16502 }
16503
16504 static struct mlx5_flow_meter_sub_policy *
16505 __flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev,
16506                 struct mlx5_flow_meter_policy *mtr_policy,
16507                 struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS],
16508                 struct mlx5_flow_meter_sub_policy *next_sub_policy,
16509                 bool *is_reuse)
16510 {
16511         struct mlx5_priv *priv = dev->data->dev_private;
16512         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
16513         uint32_t sub_policy_idx = 0;
16514         uint32_t hrxq_idx[MLX5_MTR_RTE_COLORS] = {0};
16515         uint32_t i, j;
16516         struct mlx5_hrxq *hrxq;
16517         struct mlx5_flow_handle dh;
16518         struct mlx5_meter_policy_action_container *act_cnt;
16519         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
16520         uint16_t sub_policy_num;
16521
16522         rte_spinlock_lock(&mtr_policy->sl);
16523         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
16524                 if (!rss_desc[i])
16525                         continue;
16526                 hrxq_idx[i] = mlx5_hrxq_get(dev, rss_desc[i]);
16527                 if (!hrxq_idx[i]) {
16528                         rte_spinlock_unlock(&mtr_policy->sl);
16529                         return NULL;
16530                 }
16531         }
16532         sub_policy_num = (mtr_policy->sub_policy_num >>
16533                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16534                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16535         for (i = 0; i < sub_policy_num;
16536                 i++) {
16537                 for (j = 0; j < MLX5_MTR_RTE_COLORS; j++) {
16538                         if (rss_desc[j] &&
16539                                 hrxq_idx[j] !=
16540                         mtr_policy->sub_policys[domain][i]->rix_hrxq[j])
16541                                 break;
16542                 }
16543                 if (j >= MLX5_MTR_RTE_COLORS) {
16544                         /*
16545                          * Found the sub policy table with
16546                          * the same queue per color
16547                          */
16548                         rte_spinlock_unlock(&mtr_policy->sl);
16549                         for (j = 0; j < MLX5_MTR_RTE_COLORS; j++)
16550                                 mlx5_hrxq_release(dev, hrxq_idx[j]);
16551                         *is_reuse = true;
16552                         return mtr_policy->sub_policys[domain][i];
16553                 }
16554         }
16555         /* Create sub policy. */
16556         if (!mtr_policy->sub_policys[domain][0]->rix_hrxq[0]) {
16557                 /* Reuse the first dummy sub_policy*/
16558                 sub_policy = mtr_policy->sub_policys[domain][0];
16559                 sub_policy_idx = sub_policy->idx;
16560         } else {
16561                 sub_policy = mlx5_ipool_zmalloc
16562                                 (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16563                                 &sub_policy_idx);
16564                 if (!sub_policy ||
16565                         sub_policy_idx > MLX5_MAX_SUB_POLICY_TBL_NUM) {
16566                         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++)
16567                                 mlx5_hrxq_release(dev, hrxq_idx[i]);
16568                         goto rss_sub_policy_error;
16569                 }
16570                 sub_policy->idx = sub_policy_idx;
16571                 sub_policy->main_policy = mtr_policy;
16572         }
16573         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
16574                 if (!rss_desc[i])
16575                         continue;
16576                 sub_policy->rix_hrxq[i] = hrxq_idx[i];
16577                 if (mtr_policy->is_hierarchy) {
16578                         act_cnt = &mtr_policy->act_cnt[i];
16579                         act_cnt->next_sub_policy = next_sub_policy;
16580                         mlx5_hrxq_release(dev, hrxq_idx[i]);
16581                 } else {
16582                         /*
16583                          * Overwrite the last action from
16584                          * RSS action to Queue action.
16585                          */
16586                         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
16587                                 hrxq_idx[i]);
16588                         if (!hrxq) {
16589                                 DRV_LOG(ERR, "Failed to create policy hrxq");
16590                                 goto rss_sub_policy_error;
16591                         }
16592                         act_cnt = &mtr_policy->act_cnt[i];
16593                         if (act_cnt->rix_mark || act_cnt->modify_hdr) {
16594                                 memset(&dh, 0, sizeof(struct mlx5_flow_handle));
16595                                 if (act_cnt->rix_mark)
16596                                         dh.mark = 1;
16597                                 dh.fate_action = MLX5_FLOW_FATE_QUEUE;
16598                                 dh.rix_hrxq = hrxq_idx[i];
16599                                 flow_drv_rxq_flags_set(dev, &dh);
16600                         }
16601                 }
16602         }
16603         if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
16604                 sub_policy, domain)) {
16605                 DRV_LOG(ERR, "Failed to create policy "
16606                         "rules per domain.");
16607                 goto rss_sub_policy_error;
16608         }
16609         if (sub_policy != mtr_policy->sub_policys[domain][0]) {
16610                 i = (mtr_policy->sub_policy_num >>
16611                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16612                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16613                 mtr_policy->sub_policys[domain][i] = sub_policy;
16614                 i++;
16615                 if (i > MLX5_MTR_RSS_MAX_SUB_POLICY)
16616                         goto rss_sub_policy_error;
16617                 mtr_policy->sub_policy_num &= ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
16618                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
16619                 mtr_policy->sub_policy_num |=
16620                         (i & MLX5_MTR_SUB_POLICY_NUM_MASK) <<
16621                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
16622         }
16623         rte_spinlock_unlock(&mtr_policy->sl);
16624         *is_reuse = false;
16625         return sub_policy;
16626 rss_sub_policy_error:
16627         if (sub_policy) {
16628                 __flow_dv_destroy_sub_policy_rules(dev, sub_policy);
16629                 if (sub_policy != mtr_policy->sub_policys[domain][0]) {
16630                         i = (mtr_policy->sub_policy_num >>
16631                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16632                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16633                         mtr_policy->sub_policys[domain][i] = NULL;
16634                         mlx5_ipool_free
16635                         (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16636                                         sub_policy->idx);
16637                 }
16638         }
16639         rte_spinlock_unlock(&mtr_policy->sl);
16640         return NULL;
16641 }
16642
16643 /**
16644  * Find the policy table for prefix table with RSS.
16645  *
16646  * @param[in] dev
16647  *   Pointer to Ethernet device.
16648  * @param[in] mtr_policy
16649  *   Pointer to meter policy table.
16650  * @param[in] rss_desc
16651  *   Pointer to rss_desc
16652  * @return
16653  *   Pointer to table set on success, NULL otherwise and rte_errno is set.
16654  */
16655 static struct mlx5_flow_meter_sub_policy *
16656 flow_dv_meter_sub_policy_rss_prepare(struct rte_eth_dev *dev,
16657                 struct mlx5_flow_meter_policy *mtr_policy,
16658                 struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS])
16659 {
16660         struct mlx5_priv *priv = dev->data->dev_private;
16661         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
16662         struct mlx5_flow_meter_info *next_fm;
16663         struct mlx5_flow_meter_policy *next_policy;
16664         struct mlx5_flow_meter_sub_policy *next_sub_policy = NULL;
16665         struct mlx5_flow_meter_policy *policies[MLX5_MTR_CHAIN_MAX_NUM];
16666         struct mlx5_flow_meter_sub_policy *sub_policies[MLX5_MTR_CHAIN_MAX_NUM];
16667         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
16668         bool reuse_sub_policy;
16669         uint32_t i = 0;
16670         uint32_t j = 0;
16671
16672         while (true) {
16673                 /* Iterate hierarchy to get all policies in this hierarchy. */
16674                 policies[i++] = mtr_policy;
16675                 if (!mtr_policy->is_hierarchy)
16676                         break;
16677                 if (i >= MLX5_MTR_CHAIN_MAX_NUM) {
16678                         DRV_LOG(ERR, "Exceed max meter number in hierarchy.");
16679                         return NULL;
16680                 }
16681                 next_fm = mlx5_flow_meter_find(priv,
16682                         mtr_policy->act_cnt[RTE_COLOR_GREEN].next_mtr_id, NULL);
16683                 if (!next_fm) {
16684                         DRV_LOG(ERR, "Failed to get next meter in hierarchy.");
16685                         return NULL;
16686                 }
16687                 next_policy =
16688                         mlx5_flow_meter_policy_find(dev, next_fm->policy_id,
16689                                                     NULL);
16690                 MLX5_ASSERT(next_policy);
16691                 mtr_policy = next_policy;
16692         }
16693         while (i) {
16694                 /**
16695                  * From last policy to the first one in hierarchy,
16696                  * create/get the sub policy for each of them.
16697                  */
16698                 sub_policy = __flow_dv_meter_get_rss_sub_policy(dev,
16699                                                         policies[--i],
16700                                                         rss_desc,
16701                                                         next_sub_policy,
16702                                                         &reuse_sub_policy);
16703                 if (!sub_policy) {
16704                         DRV_LOG(ERR, "Failed to get the sub policy.");
16705                         goto err_exit;
16706                 }
16707                 if (!reuse_sub_policy)
16708                         sub_policies[j++] = sub_policy;
16709                 next_sub_policy = sub_policy;
16710         }
16711         return sub_policy;
16712 err_exit:
16713         while (j) {
16714                 uint16_t sub_policy_num;
16715
16716                 sub_policy = sub_policies[--j];
16717                 mtr_policy = sub_policy->main_policy;
16718                 __flow_dv_destroy_sub_policy_rules(dev, sub_policy);
16719                 if (sub_policy != mtr_policy->sub_policys[domain][0]) {
16720                         sub_policy_num = (mtr_policy->sub_policy_num >>
16721                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16722                                 MLX5_MTR_SUB_POLICY_NUM_MASK;
16723                         mtr_policy->sub_policys[domain][sub_policy_num - 1] =
16724                                                                         NULL;
16725                         sub_policy_num--;
16726                         mtr_policy->sub_policy_num &=
16727                                 ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
16728                                   (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i));
16729                         mtr_policy->sub_policy_num |=
16730                         (sub_policy_num & MLX5_MTR_SUB_POLICY_NUM_MASK) <<
16731                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i);
16732                         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16733                                         sub_policy->idx);
16734                 }
16735         }
16736         return NULL;
16737 }
16738
16739 /**
16740  * Create the sub policy tag rule for all meters in hierarchy.
16741  *
16742  * @param[in] dev
16743  *   Pointer to Ethernet device.
16744  * @param[in] fm
16745  *   Meter information table.
16746  * @param[in] src_port
16747  *   The src port this extra rule should use.
16748  * @param[in] item
16749  *   The src port match item.
16750  * @param[out] error
16751  *   Perform verbose error reporting if not NULL.
16752  * @return
16753  *   0 on success, a negative errno value otherwise and rte_errno is set.
16754  */
16755 static int
16756 flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev,
16757                                 struct mlx5_flow_meter_info *fm,
16758                                 int32_t src_port,
16759                                 const struct rte_flow_item *item,
16760                                 struct rte_flow_error *error)
16761 {
16762         struct mlx5_priv *priv = dev->data->dev_private;
16763         struct mlx5_flow_meter_policy *mtr_policy;
16764         struct mlx5_flow_meter_sub_policy *sub_policy;
16765         struct mlx5_flow_meter_info *next_fm = NULL;
16766         struct mlx5_flow_meter_policy *next_policy;
16767         struct mlx5_flow_meter_sub_policy *next_sub_policy;
16768         struct mlx5_flow_tbl_data_entry *tbl_data;
16769         struct mlx5_sub_policy_color_rule *color_rule;
16770         struct mlx5_meter_policy_acts acts;
16771         uint32_t color_reg_c_idx;
16772         bool mtr_first = (src_port != UINT16_MAX) ? true : false;
16773         struct rte_flow_attr attr = {
16774                 .group = MLX5_FLOW_TABLE_LEVEL_POLICY,
16775                 .priority = 0,
16776                 .ingress = 0,
16777                 .egress = 0,
16778                 .transfer = 1,
16779                 .reserved = 0,
16780         };
16781         uint32_t domain = MLX5_MTR_DOMAIN_TRANSFER;
16782         int i;
16783
16784         mtr_policy = mlx5_flow_meter_policy_find(dev, fm->policy_id, NULL);
16785         MLX5_ASSERT(mtr_policy);
16786         if (!mtr_policy->is_hierarchy)
16787                 return 0;
16788         next_fm = mlx5_flow_meter_find(priv,
16789                         mtr_policy->act_cnt[RTE_COLOR_GREEN].next_mtr_id, NULL);
16790         if (!next_fm) {
16791                 return rte_flow_error_set(error, EINVAL,
16792                                 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
16793                                 "Failed to find next meter in hierarchy.");
16794         }
16795         if (!next_fm->drop_cnt)
16796                 goto exit;
16797         color_reg_c_idx = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, error);
16798         sub_policy = mtr_policy->sub_policys[domain][0];
16799         for (i = 0; i < RTE_COLORS; i++) {
16800                 bool rule_exist = false;
16801                 struct mlx5_meter_policy_action_container *act_cnt;
16802
16803                 if (i >= RTE_COLOR_YELLOW)
16804                         break;
16805                 TAILQ_FOREACH(color_rule,
16806                               &sub_policy->color_rules[i], next_port)
16807                         if (color_rule->src_port == src_port) {
16808                                 rule_exist = true;
16809                                 break;
16810                         }
16811                 if (rule_exist)
16812                         continue;
16813                 color_rule = mlx5_malloc(MLX5_MEM_ZERO,
16814                                 sizeof(struct mlx5_sub_policy_color_rule),
16815                                 0, SOCKET_ID_ANY);
16816                 if (!color_rule)
16817                         return rte_flow_error_set(error, ENOMEM,
16818                                 RTE_FLOW_ERROR_TYPE_ACTION,
16819                                 NULL, "No memory to create tag color rule.");
16820                 color_rule->src_port = src_port;
16821                 attr.priority = i;
16822                 next_policy = mlx5_flow_meter_policy_find(dev,
16823                                                 next_fm->policy_id, NULL);
16824                 MLX5_ASSERT(next_policy);
16825                 next_sub_policy = next_policy->sub_policys[domain][0];
16826                 tbl_data = container_of(next_sub_policy->tbl_rsc,
16827                                         struct mlx5_flow_tbl_data_entry, tbl);
16828                 act_cnt = &mtr_policy->act_cnt[i];
16829                 if (mtr_first) {
16830                         acts.dv_actions[0] = next_fm->meter_action;
16831                         acts.dv_actions[1] = act_cnt->modify_hdr->action;
16832                 } else {
16833                         acts.dv_actions[0] = act_cnt->modify_hdr->action;
16834                         acts.dv_actions[1] = next_fm->meter_action;
16835                 }
16836                 acts.dv_actions[2] = tbl_data->jump.action;
16837                 acts.actions_n = 3;
16838                 if (mlx5_flow_meter_attach(priv, next_fm, &attr, error)) {
16839                         next_fm = NULL;
16840                         goto err_exit;
16841                 }
16842                 if (__flow_dv_create_policy_matcher(dev, color_reg_c_idx,
16843                                         i, sub_policy, &attr, true, item,
16844                                         &color_rule->matcher, error)) {
16845                         rte_flow_error_set(error, errno,
16846                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
16847                                 "Failed to create hierarchy meter matcher.");
16848                         goto err_exit;
16849                 }
16850                 if (__flow_dv_create_policy_flow(dev, color_reg_c_idx,
16851                                         (enum rte_color)i,
16852                                         color_rule->matcher->matcher_object,
16853                                         acts.actions_n, acts.dv_actions,
16854                                         true, item,
16855                                         &color_rule->rule, &attr)) {
16856                         rte_flow_error_set(error, errno,
16857                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
16858                                 "Failed to create hierarchy meter rule.");
16859                         goto err_exit;
16860                 }
16861                 TAILQ_INSERT_TAIL(&sub_policy->color_rules[i],
16862                                   color_rule, next_port);
16863         }
16864 exit:
16865         /**
16866          * Recursive call to iterate all meters in hierarchy and
16867          * create needed rules.
16868          */
16869         return flow_dv_meter_hierarchy_rule_create(dev, next_fm,
16870                                                 src_port, item, error);
16871 err_exit:
16872         if (color_rule) {
16873                 if (color_rule->rule)
16874                         mlx5_flow_os_destroy_flow(color_rule->rule);
16875                 if (color_rule->matcher) {
16876                         struct mlx5_flow_tbl_data_entry *tbl =
16877                                 container_of(color_rule->matcher->tbl,
16878                                                 typeof(*tbl), tbl);
16879                         mlx5_list_unregister(tbl->matchers,
16880                                                 &color_rule->matcher->entry);
16881                 }
16882                 mlx5_free(color_rule);
16883         }
16884         if (next_fm)
16885                 mlx5_flow_meter_detach(priv, next_fm);
16886         return -rte_errno;
16887 }
16888
16889 /**
16890  * Destroy the sub policy table with RX queue.
16891  *
16892  * @param[in] dev
16893  *   Pointer to Ethernet device.
16894  * @param[in] mtr_policy
16895  *   Pointer to meter policy table.
16896  */
16897 static void
16898 flow_dv_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev,
16899                 struct mlx5_flow_meter_policy *mtr_policy)
16900 {
16901         struct mlx5_priv *priv = dev->data->dev_private;
16902         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
16903         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
16904         uint32_t i, j;
16905         uint16_t sub_policy_num, new_policy_num;
16906
16907         rte_spinlock_lock(&mtr_policy->sl);
16908         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
16909                 switch (mtr_policy->act_cnt[i].fate_action) {
16910                 case MLX5_FLOW_FATE_SHARED_RSS:
16911                         sub_policy_num = (mtr_policy->sub_policy_num >>
16912                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16913                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16914                         new_policy_num = sub_policy_num;
16915                         for (j = 0; j < sub_policy_num; j++) {
16916                                 sub_policy =
16917                                         mtr_policy->sub_policys[domain][j];
16918                                 if (sub_policy) {
16919                                         __flow_dv_destroy_sub_policy_rules(dev,
16920                                                 sub_policy);
16921                                 if (sub_policy !=
16922                                         mtr_policy->sub_policys[domain][0]) {
16923                                         mtr_policy->sub_policys[domain][j] =
16924                                                                 NULL;
16925                                         mlx5_ipool_free
16926                                 (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16927                                                 sub_policy->idx);
16928                                                 new_policy_num--;
16929                                         }
16930                                 }
16931                         }
16932                         if (new_policy_num != sub_policy_num) {
16933                                 mtr_policy->sub_policy_num &=
16934                                 ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
16935                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
16936                                 mtr_policy->sub_policy_num |=
16937                                 (new_policy_num &
16938                                         MLX5_MTR_SUB_POLICY_NUM_MASK) <<
16939                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
16940                         }
16941                         break;
16942                 case MLX5_FLOW_FATE_QUEUE:
16943                         sub_policy = mtr_policy->sub_policys[domain][0];
16944                         __flow_dv_destroy_sub_policy_rules(dev,
16945                                                 sub_policy);
16946                         break;
16947                 default:
16948                         /*Other actions without queue and do nothing*/
16949                         break;
16950                 }
16951         }
16952         rte_spinlock_unlock(&mtr_policy->sl);
16953 }
16954
16955 /**
16956  * Validate the batch counter support in root table.
16957  *
16958  * Create a simple flow with invalid counter and drop action on root table to
16959  * validate if batch counter with offset on root table is supported or not.
16960  *
16961  * @param[in] dev
16962  *   Pointer to rte_eth_dev structure.
16963  *
16964  * @return
16965  *   0 on success, a negative errno value otherwise and rte_errno is set.
16966  */
16967 int
16968 mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
16969 {
16970         struct mlx5_priv *priv = dev->data->dev_private;
16971         struct mlx5_dev_ctx_shared *sh = priv->sh;
16972         struct mlx5_flow_dv_match_params mask = {
16973                 .size = sizeof(mask.buf),
16974         };
16975         struct mlx5_flow_dv_match_params value = {
16976                 .size = sizeof(value.buf),
16977         };
16978         struct mlx5dv_flow_matcher_attr dv_attr = {
16979                 .type = IBV_FLOW_ATTR_NORMAL | IBV_FLOW_ATTR_FLAGS_EGRESS,
16980                 .priority = 0,
16981                 .match_criteria_enable = 0,
16982                 .match_mask = (void *)&mask,
16983         };
16984         void *actions[2] = { 0 };
16985         struct mlx5_flow_tbl_resource *tbl = NULL;
16986         struct mlx5_devx_obj *dcs = NULL;
16987         void *matcher = NULL;
16988         void *flow = NULL;
16989         int ret = -1;
16990
16991         tbl = flow_dv_tbl_resource_get(dev, 0, 1, 0, false, NULL,
16992                                         0, 0, 0, NULL);
16993         if (!tbl)
16994                 goto err;
16995         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
16996         if (!dcs)
16997                 goto err;
16998         ret = mlx5_flow_os_create_flow_action_count(dcs->obj, UINT16_MAX,
16999                                                     &actions[0]);
17000         if (ret)
17001                 goto err;
17002         dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
17003         __flow_dv_adjust_buf_size(&mask.size, dv_attr.match_criteria_enable);
17004         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
17005                                                &matcher);
17006         if (ret)
17007                 goto err;
17008         __flow_dv_adjust_buf_size(&value.size, dv_attr.match_criteria_enable);
17009         ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 1,
17010                                        actions, &flow);
17011 err:
17012         /*
17013          * If batch counter with offset is not supported, the driver will not
17014          * validate the invalid offset value, flow create should success.
17015          * In this case, it means batch counter is not supported in root table.
17016          *
17017          * Otherwise, if flow create is failed, counter offset is supported.
17018          */
17019         if (flow) {
17020                 DRV_LOG(INFO, "Batch counter is not supported in root "
17021                               "table. Switch to fallback mode.");
17022                 rte_errno = ENOTSUP;
17023                 ret = -rte_errno;
17024                 claim_zero(mlx5_flow_os_destroy_flow(flow));
17025         } else {
17026                 /* Check matcher to make sure validate fail at flow create. */
17027                 if (!matcher || (matcher && errno != EINVAL))
17028                         DRV_LOG(ERR, "Unexpected error in counter offset "
17029                                      "support detection");
17030                 ret = 0;
17031         }
17032         if (actions[0])
17033                 claim_zero(mlx5_flow_os_destroy_flow_action(actions[0]));
17034         if (matcher)
17035                 claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
17036         if (tbl)
17037                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
17038         if (dcs)
17039                 claim_zero(mlx5_devx_cmd_destroy(dcs));
17040         return ret;
17041 }
17042
17043 /**
17044  * Query a devx counter.
17045  *
17046  * @param[in] dev
17047  *   Pointer to the Ethernet device structure.
17048  * @param[in] cnt
17049  *   Index to the flow counter.
17050  * @param[in] clear
17051  *   Set to clear the counter statistics.
17052  * @param[out] pkts
17053  *   The statistics value of packets.
17054  * @param[out] bytes
17055  *   The statistics value of bytes.
17056  *
17057  * @return
17058  *   0 on success, otherwise return -1.
17059  */
17060 static int
17061 flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear,
17062                       uint64_t *pkts, uint64_t *bytes)
17063 {
17064         struct mlx5_priv *priv = dev->data->dev_private;
17065         struct mlx5_flow_counter *cnt;
17066         uint64_t inn_pkts, inn_bytes;
17067         int ret;
17068
17069         if (!priv->config.devx)
17070                 return -1;
17071
17072         ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes);
17073         if (ret)
17074                 return -1;
17075         cnt = flow_dv_counter_get_by_idx(dev, counter, NULL);
17076         *pkts = inn_pkts - cnt->hits;
17077         *bytes = inn_bytes - cnt->bytes;
17078         if (clear) {
17079                 cnt->hits = inn_pkts;
17080                 cnt->bytes = inn_bytes;
17081         }
17082         return 0;
17083 }
17084
17085 /**
17086  * Get aged-out flows.
17087  *
17088  * @param[in] dev
17089  *   Pointer to the Ethernet device structure.
17090  * @param[in] context
17091  *   The address of an array of pointers to the aged-out flows contexts.
17092  * @param[in] nb_contexts
17093  *   The length of context array pointers.
17094  * @param[out] error
17095  *   Perform verbose error reporting if not NULL. Initialized in case of
17096  *   error only.
17097  *
17098  * @return
17099  *   how many contexts get in success, otherwise negative errno value.
17100  *   if nb_contexts is 0, return the amount of all aged contexts.
17101  *   if nb_contexts is not 0 , return the amount of aged flows reported
17102  *   in the context array.
17103  * @note: only stub for now
17104  */
17105 static int
17106 flow_get_aged_flows(struct rte_eth_dev *dev,
17107                     void **context,
17108                     uint32_t nb_contexts,
17109                     struct rte_flow_error *error)
17110 {
17111         struct mlx5_priv *priv = dev->data->dev_private;
17112         struct mlx5_age_info *age_info;
17113         struct mlx5_age_param *age_param;
17114         struct mlx5_flow_counter *counter;
17115         struct mlx5_aso_age_action *act;
17116         int nb_flows = 0;
17117
17118         if (nb_contexts && !context)
17119                 return rte_flow_error_set(error, EINVAL,
17120                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
17121                                           NULL, "empty context");
17122         age_info = GET_PORT_AGE_INFO(priv);
17123         rte_spinlock_lock(&age_info->aged_sl);
17124         LIST_FOREACH(act, &age_info->aged_aso, next) {
17125                 nb_flows++;
17126                 if (nb_contexts) {
17127                         context[nb_flows - 1] =
17128                                                 act->age_params.context;
17129                         if (!(--nb_contexts))
17130                                 break;
17131                 }
17132         }
17133         TAILQ_FOREACH(counter, &age_info->aged_counters, next) {
17134                 nb_flows++;
17135                 if (nb_contexts) {
17136                         age_param = MLX5_CNT_TO_AGE(counter);
17137                         context[nb_flows - 1] = age_param->context;
17138                         if (!(--nb_contexts))
17139                                 break;
17140                 }
17141         }
17142         rte_spinlock_unlock(&age_info->aged_sl);
17143         MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
17144         return nb_flows;
17145 }
17146
17147 /*
17148  * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
17149  */
17150 static uint32_t
17151 flow_dv_counter_allocate(struct rte_eth_dev *dev)
17152 {
17153         return flow_dv_counter_alloc(dev, 0);
17154 }
17155
17156 /**
17157  * Validate indirect action.
17158  * Dispatcher for action type specific validation.
17159  *
17160  * @param[in] dev
17161  *   Pointer to the Ethernet device structure.
17162  * @param[in] conf
17163  *   Indirect action configuration.
17164  * @param[in] action
17165  *   The indirect action object to validate.
17166  * @param[out] error
17167  *   Perform verbose error reporting if not NULL. Initialized in case of
17168  *   error only.
17169  *
17170  * @return
17171  *   0 on success, otherwise negative errno value.
17172  */
17173 static int
17174 flow_dv_action_validate(struct rte_eth_dev *dev,
17175                         const struct rte_flow_indir_action_conf *conf,
17176                         const struct rte_flow_action *action,
17177                         struct rte_flow_error *err)
17178 {
17179         struct mlx5_priv *priv = dev->data->dev_private;
17180
17181         RTE_SET_USED(conf);
17182         switch (action->type) {
17183         case RTE_FLOW_ACTION_TYPE_RSS:
17184                 /*
17185                  * priv->obj_ops is set according to driver capabilities.
17186                  * When DevX capabilities are
17187                  * sufficient, it is set to devx_obj_ops.
17188                  * Otherwise, it is set to ibv_obj_ops.
17189                  * ibv_obj_ops doesn't support ind_table_modify operation.
17190                  * In this case the indirect RSS action can't be used.
17191                  */
17192                 if (priv->obj_ops.ind_table_modify == NULL)
17193                         return rte_flow_error_set
17194                                         (err, ENOTSUP,
17195                                          RTE_FLOW_ERROR_TYPE_ACTION,
17196                                          NULL,
17197                                          "Indirect RSS action not supported");
17198                 return mlx5_validate_action_rss(dev, action, err);
17199         case RTE_FLOW_ACTION_TYPE_AGE:
17200                 if (!priv->sh->aso_age_mng)
17201                         return rte_flow_error_set(err, ENOTSUP,
17202                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
17203                                                 NULL,
17204                                                 "Indirect age action not supported");
17205                 return flow_dv_validate_action_age(0, action, dev, err);
17206         case RTE_FLOW_ACTION_TYPE_COUNT:
17207                 /*
17208                  * There are two mechanisms to share the action count.
17209                  * The old mechanism uses the shared field to share, while the
17210                  * new mechanism uses the indirect action API.
17211                  * This validation comes to make sure that the two mechanisms
17212                  * are not combined.
17213                  */
17214                 if (is_shared_action_count(action))
17215                         return rte_flow_error_set(err, ENOTSUP,
17216                                                   RTE_FLOW_ERROR_TYPE_ACTION,
17217                                                   NULL,
17218                                                   "Mix shared and indirect counter is not supported");
17219                 return flow_dv_validate_action_count(dev, true, 0, err);
17220         case RTE_FLOW_ACTION_TYPE_CONNTRACK:
17221                 if (!priv->sh->ct_aso_en)
17222                         return rte_flow_error_set(err, ENOTSUP,
17223                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
17224                                         "ASO CT is not supported");
17225                 return mlx5_validate_action_ct(dev, action->conf, err);
17226         default:
17227                 return rte_flow_error_set(err, ENOTSUP,
17228                                           RTE_FLOW_ERROR_TYPE_ACTION,
17229                                           NULL,
17230                                           "action type not supported");
17231         }
17232 }
17233
17234 /**
17235  * Validate the meter hierarchy chain for meter policy.
17236  *
17237  * @param[in] dev
17238  *   Pointer to the Ethernet device structure.
17239  * @param[in] meter_id
17240  *   Meter id.
17241  * @param[in] action_flags
17242  *   Holds the actions detected until now.
17243  * @param[out] is_rss
17244  *   Is RSS or not.
17245  * @param[out] hierarchy_domain
17246  *   The domain bitmap for hierarchy policy.
17247  * @param[out] error
17248  *   Perform verbose error reporting if not NULL. Initialized in case of
17249  *   error only.
17250  *
17251  * @return
17252  *   0 on success, otherwise negative errno value with error set.
17253  */
17254 static int
17255 flow_dv_validate_policy_mtr_hierarchy(struct rte_eth_dev *dev,
17256                                   uint32_t meter_id,
17257                                   uint64_t action_flags,
17258                                   bool *is_rss,
17259                                   uint8_t *hierarchy_domain,
17260                                   struct rte_mtr_error *error)
17261 {
17262         struct mlx5_priv *priv = dev->data->dev_private;
17263         struct mlx5_flow_meter_info *fm;
17264         struct mlx5_flow_meter_policy *policy;
17265         uint8_t cnt = 1;
17266
17267         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
17268                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
17269                 return -rte_mtr_error_set(error, EINVAL,
17270                                         RTE_MTR_ERROR_TYPE_POLICER_ACTION_GREEN,
17271                                         NULL,
17272                                         "Multiple fate actions not supported.");
17273         while (true) {
17274                 fm = mlx5_flow_meter_find(priv, meter_id, NULL);
17275                 if (!fm)
17276                         return -rte_mtr_error_set(error, EINVAL,
17277                                                 RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
17278                                         "Meter not found in meter hierarchy.");
17279                 if (fm->def_policy)
17280                         return -rte_mtr_error_set(error, EINVAL,
17281                                         RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
17282                         "Non termination meter not supported in hierarchy.");
17283                 policy = mlx5_flow_meter_policy_find(dev, fm->policy_id, NULL);
17284                 MLX5_ASSERT(policy);
17285                 if (!policy->is_hierarchy) {
17286                         if (policy->transfer)
17287                                 *hierarchy_domain |=
17288                                                 MLX5_MTR_DOMAIN_TRANSFER_BIT;
17289                         if (policy->ingress)
17290                                 *hierarchy_domain |=
17291                                                 MLX5_MTR_DOMAIN_INGRESS_BIT;
17292                         if (policy->egress)
17293                                 *hierarchy_domain |= MLX5_MTR_DOMAIN_EGRESS_BIT;
17294                         *is_rss = policy->is_rss;
17295                         break;
17296                 }
17297                 meter_id = policy->act_cnt[RTE_COLOR_GREEN].next_mtr_id;
17298                 if (++cnt >= MLX5_MTR_CHAIN_MAX_NUM)
17299                         return -rte_mtr_error_set(error, EINVAL,
17300                                         RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
17301                                         "Exceed max hierarchy meter number.");
17302         }
17303         return 0;
17304 }
17305
17306 /**
17307  * Validate meter policy actions.
17308  * Dispatcher for action type specific validation.
17309  *
17310  * @param[in] dev
17311  *   Pointer to the Ethernet device structure.
17312  * @param[in] action
17313  *   The meter policy action object to validate.
17314  * @param[in] attr
17315  *   Attributes of flow to determine steering domain.
17316  * @param[out] error
17317  *   Perform verbose error reporting if not NULL. Initialized in case of
17318  *   error only.
17319  *
17320  * @return
17321  *   0 on success, otherwise negative errno value.
17322  */
17323 static int
17324 flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev,
17325                         const struct rte_flow_action *actions[RTE_COLORS],
17326                         struct rte_flow_attr *attr,
17327                         bool *is_rss,
17328                         uint8_t *domain_bitmap,
17329                         bool *is_def_policy,
17330                         struct rte_mtr_error *error)
17331 {
17332         struct mlx5_priv *priv = dev->data->dev_private;
17333         struct mlx5_dev_config *dev_conf = &priv->config;
17334         const struct rte_flow_action *act;
17335         uint64_t action_flags = 0;
17336         int actions_n;
17337         int i, ret;
17338         struct rte_flow_error flow_err;
17339         uint8_t domain_color[RTE_COLORS] = {0};
17340         uint8_t def_domain = MLX5_MTR_ALL_DOMAIN_BIT;
17341         uint8_t hierarchy_domain = 0;
17342         const struct rte_flow_action_meter *mtr;
17343
17344         if (!priv->config.dv_esw_en)
17345                 def_domain &= ~MLX5_MTR_DOMAIN_TRANSFER_BIT;
17346         *domain_bitmap = def_domain;
17347         if (actions[RTE_COLOR_YELLOW] &&
17348                 actions[RTE_COLOR_YELLOW]->type != RTE_FLOW_ACTION_TYPE_END)
17349                 return -rte_mtr_error_set(error, ENOTSUP,
17350                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
17351                                 NULL,
17352                                 "Yellow color does not support any action.");
17353         if (actions[RTE_COLOR_YELLOW] &&
17354                 actions[RTE_COLOR_YELLOW]->type != RTE_FLOW_ACTION_TYPE_DROP)
17355                 return -rte_mtr_error_set(error, ENOTSUP,
17356                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
17357                                 NULL, "Red color only supports drop action.");
17358         /*
17359          * Check default policy actions:
17360          * Green/Yellow: no action, Red: drop action
17361          */
17362         if ((!actions[RTE_COLOR_GREEN] ||
17363                 actions[RTE_COLOR_GREEN]->type == RTE_FLOW_ACTION_TYPE_END)) {
17364                 *is_def_policy = true;
17365                 return 0;
17366         }
17367         flow_err.message = NULL;
17368         for (i = 0; i < RTE_COLORS; i++) {
17369                 act = actions[i];
17370                 for (action_flags = 0, actions_n = 0;
17371                         act && act->type != RTE_FLOW_ACTION_TYPE_END;
17372                         act++) {
17373                         if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
17374                                 return -rte_mtr_error_set(error, ENOTSUP,
17375                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17376                                           NULL, "too many actions");
17377                         switch (act->type) {
17378                         case RTE_FLOW_ACTION_TYPE_PORT_ID:
17379                                 if (!priv->config.dv_esw_en)
17380                                         return -rte_mtr_error_set(error,
17381                                         ENOTSUP,
17382                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17383                                         NULL, "PORT action validate check"
17384                                         " fail for ESW disable");
17385                                 ret = flow_dv_validate_action_port_id(dev,
17386                                                 action_flags,
17387                                                 act, attr, &flow_err);
17388                                 if (ret)
17389                                         return -rte_mtr_error_set(error,
17390                                         ENOTSUP,
17391                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17392                                         NULL, flow_err.message ?
17393                                         flow_err.message :
17394                                         "PORT action validate check fail");
17395                                 ++actions_n;
17396                                 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
17397                                 break;
17398                         case RTE_FLOW_ACTION_TYPE_MARK:
17399                                 ret = flow_dv_validate_action_mark(dev, act,
17400                                                            action_flags,
17401                                                            attr, &flow_err);
17402                                 if (ret < 0)
17403                                         return -rte_mtr_error_set(error,
17404                                         ENOTSUP,
17405                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17406                                         NULL, flow_err.message ?
17407                                         flow_err.message :
17408                                         "Mark action validate check fail");
17409                                 if (dev_conf->dv_xmeta_en !=
17410                                         MLX5_XMETA_MODE_LEGACY)
17411                                         return -rte_mtr_error_set(error,
17412                                         ENOTSUP,
17413                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17414                                         NULL, "Extend MARK action is "
17415                                         "not supported. Please try use "
17416                                         "default policy for meter.");
17417                                 action_flags |= MLX5_FLOW_ACTION_MARK;
17418                                 ++actions_n;
17419                                 break;
17420                         case RTE_FLOW_ACTION_TYPE_SET_TAG:
17421                                 ret = flow_dv_validate_action_set_tag(dev,
17422                                                         act, action_flags,
17423                                                         attr, &flow_err);
17424                                 if (ret)
17425                                         return -rte_mtr_error_set(error,
17426                                         ENOTSUP,
17427                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17428                                         NULL, flow_err.message ?
17429                                         flow_err.message :
17430                                         "Set tag action validate check fail");
17431                                 /*
17432                                  * Count all modify-header actions
17433                                  * as one action.
17434                                  */
17435                                 if (!(action_flags &
17436                                         MLX5_FLOW_MODIFY_HDR_ACTIONS))
17437                                         ++actions_n;
17438                                 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
17439                                 break;
17440                         case RTE_FLOW_ACTION_TYPE_DROP:
17441                                 ret = mlx5_flow_validate_action_drop
17442                                         (action_flags,
17443                                         attr, &flow_err);
17444                                 if (ret < 0)
17445                                         return -rte_mtr_error_set(error,
17446                                         ENOTSUP,
17447                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17448                                         NULL, flow_err.message ?
17449                                         flow_err.message :
17450                                         "Drop action validate check fail");
17451                                 action_flags |= MLX5_FLOW_ACTION_DROP;
17452                                 ++actions_n;
17453                                 break;
17454                         case RTE_FLOW_ACTION_TYPE_QUEUE:
17455                                 /*
17456                                  * Check whether extensive
17457                                  * metadata feature is engaged.
17458                                  */
17459                                 if (dev_conf->dv_flow_en &&
17460                                         (dev_conf->dv_xmeta_en !=
17461                                         MLX5_XMETA_MODE_LEGACY) &&
17462                                         mlx5_flow_ext_mreg_supported(dev))
17463                                         return -rte_mtr_error_set(error,
17464                                           ENOTSUP,
17465                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17466                                           NULL, "Queue action with meta "
17467                                           "is not supported. Please try use "
17468                                           "default policy for meter.");
17469                                 ret = mlx5_flow_validate_action_queue(act,
17470                                                         action_flags, dev,
17471                                                         attr, &flow_err);
17472                                 if (ret < 0)
17473                                         return -rte_mtr_error_set(error,
17474                                           ENOTSUP,
17475                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17476                                           NULL, flow_err.message ?
17477                                           flow_err.message :
17478                                           "Queue action validate check fail");
17479                                 action_flags |= MLX5_FLOW_ACTION_QUEUE;
17480                                 ++actions_n;
17481                                 break;
17482                         case RTE_FLOW_ACTION_TYPE_RSS:
17483                                 if (dev_conf->dv_flow_en &&
17484                                         (dev_conf->dv_xmeta_en !=
17485                                         MLX5_XMETA_MODE_LEGACY) &&
17486                                         mlx5_flow_ext_mreg_supported(dev))
17487                                         return -rte_mtr_error_set(error,
17488                                           ENOTSUP,
17489                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17490                                           NULL, "RSS action with meta "
17491                                           "is not supported. Please try use "
17492                                           "default policy for meter.");
17493                                 ret = mlx5_validate_action_rss(dev, act,
17494                                                 &flow_err);
17495                                 if (ret < 0)
17496                                         return -rte_mtr_error_set(error,
17497                                           ENOTSUP,
17498                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17499                                           NULL, flow_err.message ?
17500                                           flow_err.message :
17501                                           "RSS action validate check fail");
17502                                 action_flags |= MLX5_FLOW_ACTION_RSS;
17503                                 ++actions_n;
17504                                 *is_rss = true;
17505                                 break;
17506                         case RTE_FLOW_ACTION_TYPE_JUMP:
17507                                 ret = flow_dv_validate_action_jump(dev,
17508                                         NULL, act, action_flags,
17509                                         attr, true, &flow_err);
17510                                 if (ret)
17511                                         return -rte_mtr_error_set(error,
17512                                           ENOTSUP,
17513                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17514                                           NULL, flow_err.message ?
17515                                           flow_err.message :
17516                                           "Jump action validate check fail");
17517                                 ++actions_n;
17518                                 action_flags |= MLX5_FLOW_ACTION_JUMP;
17519                                 break;
17520                         case RTE_FLOW_ACTION_TYPE_METER:
17521                                 if (i != RTE_COLOR_GREEN)
17522                                         return -rte_mtr_error_set(error,
17523                                                 ENOTSUP,
17524                                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
17525                                                 NULL, flow_err.message ?
17526                                                 flow_err.message :
17527                                   "Meter hierarchy only supports GREEN color.");
17528                                 mtr = act->conf;
17529                                 ret = flow_dv_validate_policy_mtr_hierarchy(dev,
17530                                                         mtr->mtr_id,
17531                                                         action_flags,
17532                                                         is_rss,
17533                                                         &hierarchy_domain,
17534                                                         error);
17535                                 if (ret)
17536                                         return ret;
17537                                 ++actions_n;
17538                                 action_flags |=
17539                                 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
17540                                 break;
17541                         default:
17542                                 return -rte_mtr_error_set(error, ENOTSUP,
17543                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17544                                         NULL,
17545                                         "Doesn't support optional action");
17546                         }
17547                 }
17548                 /* Yellow is not supported, just skip. */
17549                 if (i == RTE_COLOR_YELLOW)
17550                         continue;
17551                 if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
17552                         domain_color[i] = MLX5_MTR_DOMAIN_TRANSFER_BIT;
17553                 else if ((action_flags &
17554                         (MLX5_FLOW_ACTION_RSS | MLX5_FLOW_ACTION_QUEUE)) ||
17555                         (action_flags & MLX5_FLOW_ACTION_MARK))
17556                         /*
17557                          * Only support MLX5_XMETA_MODE_LEGACY
17558                          * so MARK action only in ingress domain.
17559                          */
17560                         domain_color[i] = MLX5_MTR_DOMAIN_INGRESS_BIT;
17561                 else if (action_flags &
17562                         MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
17563                         domain_color[i] = hierarchy_domain;
17564                 else
17565                         domain_color[i] = def_domain;
17566                 /*
17567                  * Validate the drop action mutual exclusion
17568                  * with other actions. Drop action is mutually-exclusive
17569                  * with any other action, except for Count action.
17570                  */
17571                 if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
17572                         (action_flags & ~MLX5_FLOW_ACTION_DROP)) {
17573                         return -rte_mtr_error_set(error, ENOTSUP,
17574                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
17575                                 NULL, "Drop action is mutually-exclusive "
17576                                 "with any other action");
17577                 }
17578                 /* Eswitch has few restrictions on using items and actions */
17579                 if (domain_color[i] & MLX5_MTR_DOMAIN_TRANSFER_BIT) {
17580                         if (!mlx5_flow_ext_mreg_supported(dev) &&
17581                                 action_flags & MLX5_FLOW_ACTION_MARK)
17582                                 return -rte_mtr_error_set(error, ENOTSUP,
17583                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17584                                         NULL, "unsupported action MARK");
17585                         if (action_flags & MLX5_FLOW_ACTION_QUEUE)
17586                                 return -rte_mtr_error_set(error, ENOTSUP,
17587                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17588                                         NULL, "unsupported action QUEUE");
17589                         if (action_flags & MLX5_FLOW_ACTION_RSS)
17590                                 return -rte_mtr_error_set(error, ENOTSUP,
17591                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17592                                         NULL, "unsupported action RSS");
17593                         if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
17594                                 return -rte_mtr_error_set(error, ENOTSUP,
17595                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17596                                         NULL, "no fate action is found");
17597                 } else {
17598                         if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) &&
17599                                 (domain_color[i] &
17600                                 MLX5_MTR_DOMAIN_INGRESS_BIT)) {
17601                                 if ((domain_color[i] &
17602                                         MLX5_MTR_DOMAIN_EGRESS_BIT))
17603                                         domain_color[i] =
17604                                         MLX5_MTR_DOMAIN_EGRESS_BIT;
17605                                 else
17606                                         return -rte_mtr_error_set(error,
17607                                         ENOTSUP,
17608                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17609                                         NULL, "no fate action is found");
17610                         }
17611                 }
17612                 if (domain_color[i] != def_domain)
17613                         *domain_bitmap = domain_color[i];
17614         }
17615         return 0;
17616 }
17617
17618 static int
17619 flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags)
17620 {
17621         struct mlx5_priv *priv = dev->data->dev_private;
17622         int ret = 0;
17623
17624         if ((domains & MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain != NULL) {
17625                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->rx_domain,
17626                                                 flags);
17627                 if (ret != 0)
17628                         return ret;
17629         }
17630         if ((domains & MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain != NULL) {
17631                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->tx_domain, flags);
17632                 if (ret != 0)
17633                         return ret;
17634         }
17635         if ((domains & MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain != NULL) {
17636                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->fdb_domain, flags);
17637                 if (ret != 0)
17638                         return ret;
17639         }
17640         return 0;
17641 }
17642
17643 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
17644         .validate = flow_dv_validate,
17645         .prepare = flow_dv_prepare,
17646         .translate = flow_dv_translate,
17647         .apply = flow_dv_apply,
17648         .remove = flow_dv_remove,
17649         .destroy = flow_dv_destroy,
17650         .query = flow_dv_query,
17651         .create_mtr_tbls = flow_dv_create_mtr_tbls,
17652         .destroy_mtr_tbls = flow_dv_destroy_mtr_tbls,
17653         .destroy_mtr_drop_tbls = flow_dv_destroy_mtr_drop_tbls,
17654         .create_meter = flow_dv_mtr_alloc,
17655         .free_meter = flow_dv_aso_mtr_release_to_pool,
17656         .validate_mtr_acts = flow_dv_validate_mtr_policy_acts,
17657         .create_mtr_acts = flow_dv_create_mtr_policy_acts,
17658         .destroy_mtr_acts = flow_dv_destroy_mtr_policy_acts,
17659         .create_policy_rules = flow_dv_create_policy_rules,
17660         .destroy_policy_rules = flow_dv_destroy_policy_rules,
17661         .create_def_policy = flow_dv_create_def_policy,
17662         .destroy_def_policy = flow_dv_destroy_def_policy,
17663         .meter_sub_policy_rss_prepare = flow_dv_meter_sub_policy_rss_prepare,
17664         .meter_hierarchy_rule_create = flow_dv_meter_hierarchy_rule_create,
17665         .destroy_sub_policy_with_rxq = flow_dv_destroy_sub_policy_with_rxq,
17666         .counter_alloc = flow_dv_counter_allocate,
17667         .counter_free = flow_dv_counter_free,
17668         .counter_query = flow_dv_counter_query,
17669         .get_aged_flows = flow_get_aged_flows,
17670         .action_validate = flow_dv_action_validate,
17671         .action_create = flow_dv_action_create,
17672         .action_destroy = flow_dv_action_destroy,
17673         .action_update = flow_dv_action_update,
17674         .action_query = flow_dv_action_query,
17675         .sync_domain = flow_dv_sync_domain,
17676 };
17677
17678 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
17679