net/mlx5: add per-lcore cache to the list utility
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <ethdev_driver.h>
14 #include <rte_flow.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_cycles.h>
18 #include <rte_ip.h>
19 #include <rte_gre.h>
20 #include <rte_vxlan.h>
21 #include <rte_gtp.h>
22 #include <rte_eal_paging.h>
23 #include <rte_mpls.h>
24 #include <rte_mtr.h>
25 #include <rte_mtr_driver.h>
26 #include <rte_tailq.h>
27
28 #include <mlx5_glue.h>
29 #include <mlx5_devx_cmds.h>
30 #include <mlx5_prm.h>
31 #include <mlx5_malloc.h>
32
33 #include "mlx5_defs.h"
34 #include "mlx5.h"
35 #include "mlx5_common_os.h"
36 #include "mlx5_flow.h"
37 #include "mlx5_flow_os.h"
38 #include "mlx5_rx.h"
39 #include "mlx5_tx.h"
40 #include "rte_pmd_mlx5.h"
41
42 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
43
44 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
45 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
46 #endif
47
48 #ifndef HAVE_MLX5DV_DR_ESWITCH
49 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
50 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
51 #endif
52 #endif
53
54 #ifndef HAVE_MLX5DV_DR
55 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
56 #endif
57
58 /* VLAN header definitions */
59 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
60 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
61 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
62 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
63 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
64
65 union flow_dv_attr {
66         struct {
67                 uint32_t valid:1;
68                 uint32_t ipv4:1;
69                 uint32_t ipv6:1;
70                 uint32_t tcp:1;
71                 uint32_t udp:1;
72                 uint32_t reserved:27;
73         };
74         uint32_t attr;
75 };
76
77 static int
78 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
79                              struct mlx5_flow_tbl_resource *tbl);
80
81 static int
82 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
83                                      uint32_t encap_decap_idx);
84
85 static int
86 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
87                                         uint32_t port_id);
88 static void
89 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss);
90
91 static int
92 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
93                                   uint32_t rix_jump);
94
95 /**
96  * Initialize flow attributes structure according to flow items' types.
97  *
98  * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel
99  * mode. For tunnel mode, the items to be modified are the outermost ones.
100  *
101  * @param[in] item
102  *   Pointer to item specification.
103  * @param[out] attr
104  *   Pointer to flow attributes structure.
105  * @param[in] dev_flow
106  *   Pointer to the sub flow.
107  * @param[in] tunnel_decap
108  *   Whether action is after tunnel decapsulation.
109  */
110 static void
111 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
112                   struct mlx5_flow *dev_flow, bool tunnel_decap)
113 {
114         uint64_t layers = dev_flow->handle->layers;
115
116         /*
117          * If layers is already initialized, it means this dev_flow is the
118          * suffix flow, the layers flags is set by the prefix flow. Need to
119          * use the layer flags from prefix flow as the suffix flow may not
120          * have the user defined items as the flow is split.
121          */
122         if (layers) {
123                 if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
124                         attr->ipv4 = 1;
125                 else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
126                         attr->ipv6 = 1;
127                 if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
128                         attr->tcp = 1;
129                 else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
130                         attr->udp = 1;
131                 attr->valid = 1;
132                 return;
133         }
134         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
135                 uint8_t next_protocol = 0xff;
136                 switch (item->type) {
137                 case RTE_FLOW_ITEM_TYPE_GRE:
138                 case RTE_FLOW_ITEM_TYPE_NVGRE:
139                 case RTE_FLOW_ITEM_TYPE_VXLAN:
140                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
141                 case RTE_FLOW_ITEM_TYPE_GENEVE:
142                 case RTE_FLOW_ITEM_TYPE_MPLS:
143                         if (tunnel_decap)
144                                 attr->attr = 0;
145                         break;
146                 case RTE_FLOW_ITEM_TYPE_IPV4:
147                         if (!attr->ipv6)
148                                 attr->ipv4 = 1;
149                         if (item->mask != NULL &&
150                             ((const struct rte_flow_item_ipv4 *)
151                             item->mask)->hdr.next_proto_id)
152                                 next_protocol =
153                                     ((const struct rte_flow_item_ipv4 *)
154                                       (item->spec))->hdr.next_proto_id &
155                                     ((const struct rte_flow_item_ipv4 *)
156                                       (item->mask))->hdr.next_proto_id;
157                         if ((next_protocol == IPPROTO_IPIP ||
158                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
159                                 attr->attr = 0;
160                         break;
161                 case RTE_FLOW_ITEM_TYPE_IPV6:
162                         if (!attr->ipv4)
163                                 attr->ipv6 = 1;
164                         if (item->mask != NULL &&
165                             ((const struct rte_flow_item_ipv6 *)
166                             item->mask)->hdr.proto)
167                                 next_protocol =
168                                     ((const struct rte_flow_item_ipv6 *)
169                                       (item->spec))->hdr.proto &
170                                     ((const struct rte_flow_item_ipv6 *)
171                                       (item->mask))->hdr.proto;
172                         if ((next_protocol == IPPROTO_IPIP ||
173                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
174                                 attr->attr = 0;
175                         break;
176                 case RTE_FLOW_ITEM_TYPE_UDP:
177                         if (!attr->tcp)
178                                 attr->udp = 1;
179                         break;
180                 case RTE_FLOW_ITEM_TYPE_TCP:
181                         if (!attr->udp)
182                                 attr->tcp = 1;
183                         break;
184                 default:
185                         break;
186                 }
187         }
188         attr->valid = 1;
189 }
190
191 /**
192  * Convert rte_mtr_color to mlx5 color.
193  *
194  * @param[in] rcol
195  *   rte_mtr_color.
196  *
197  * @return
198  *   mlx5 color.
199  */
200 static int
201 rte_col_2_mlx5_col(enum rte_color rcol)
202 {
203         switch (rcol) {
204         case RTE_COLOR_GREEN:
205                 return MLX5_FLOW_COLOR_GREEN;
206         case RTE_COLOR_YELLOW:
207                 return MLX5_FLOW_COLOR_YELLOW;
208         case RTE_COLOR_RED:
209                 return MLX5_FLOW_COLOR_RED;
210         default:
211                 break;
212         }
213         return MLX5_FLOW_COLOR_UNDEFINED;
214 }
215
216 struct field_modify_info {
217         uint32_t size; /* Size of field in protocol header, in bytes. */
218         uint32_t offset; /* Offset of field in protocol header, in bytes. */
219         enum mlx5_modification_field id;
220 };
221
222 struct field_modify_info modify_eth[] = {
223         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
224         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
225         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
226         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
227         {0, 0, 0},
228 };
229
230 struct field_modify_info modify_vlan_out_first_vid[] = {
231         /* Size in bits !!! */
232         {12, 0, MLX5_MODI_OUT_FIRST_VID},
233         {0, 0, 0},
234 };
235
236 struct field_modify_info modify_ipv4[] = {
237         {1,  1, MLX5_MODI_OUT_IP_DSCP},
238         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
239         {4, 12, MLX5_MODI_OUT_SIPV4},
240         {4, 16, MLX5_MODI_OUT_DIPV4},
241         {0, 0, 0},
242 };
243
244 struct field_modify_info modify_ipv6[] = {
245         {1,  0, MLX5_MODI_OUT_IP_DSCP},
246         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
247         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
248         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
249         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
250         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
251         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
252         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
253         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
254         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
255         {0, 0, 0},
256 };
257
258 struct field_modify_info modify_udp[] = {
259         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
260         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
261         {0, 0, 0},
262 };
263
264 struct field_modify_info modify_tcp[] = {
265         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
266         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
267         {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
268         {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
269         {0, 0, 0},
270 };
271
272 static const struct rte_flow_item *
273 mlx5_flow_find_tunnel_item(const struct rte_flow_item *item)
274 {
275         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
276                 switch (item->type) {
277                 default:
278                         break;
279                 case RTE_FLOW_ITEM_TYPE_VXLAN:
280                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
281                 case RTE_FLOW_ITEM_TYPE_GRE:
282                 case RTE_FLOW_ITEM_TYPE_MPLS:
283                 case RTE_FLOW_ITEM_TYPE_NVGRE:
284                 case RTE_FLOW_ITEM_TYPE_GENEVE:
285                         return item;
286                 case RTE_FLOW_ITEM_TYPE_IPV4:
287                 case RTE_FLOW_ITEM_TYPE_IPV6:
288                         if (item[1].type == RTE_FLOW_ITEM_TYPE_IPV4 ||
289                             item[1].type == RTE_FLOW_ITEM_TYPE_IPV6)
290                                 return item;
291                         break;
292                 }
293         }
294         return NULL;
295 }
296
297 static void
298 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
299                           uint8_t next_protocol, uint64_t *item_flags,
300                           int *tunnel)
301 {
302         MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
303                     item->type == RTE_FLOW_ITEM_TYPE_IPV6);
304         if (next_protocol == IPPROTO_IPIP) {
305                 *item_flags |= MLX5_FLOW_LAYER_IPIP;
306                 *tunnel = 1;
307         }
308         if (next_protocol == IPPROTO_IPV6) {
309                 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
310                 *tunnel = 1;
311         }
312 }
313
314 /* Update VLAN's VID/PCP based on input rte_flow_action.
315  *
316  * @param[in] action
317  *   Pointer to struct rte_flow_action.
318  * @param[out] vlan
319  *   Pointer to struct rte_vlan_hdr.
320  */
321 static void
322 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
323                          struct rte_vlan_hdr *vlan)
324 {
325         uint16_t vlan_tci;
326         if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
327                 vlan_tci =
328                     ((const struct rte_flow_action_of_set_vlan_pcp *)
329                                                action->conf)->vlan_pcp;
330                 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
331                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
332                 vlan->vlan_tci |= vlan_tci;
333         } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
334                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
335                 vlan->vlan_tci |= rte_be_to_cpu_16
336                     (((const struct rte_flow_action_of_set_vlan_vid *)
337                                              action->conf)->vlan_vid);
338         }
339 }
340
341 /**
342  * Fetch 1, 2, 3 or 4 byte field from the byte array
343  * and return as unsigned integer in host-endian format.
344  *
345  * @param[in] data
346  *   Pointer to data array.
347  * @param[in] size
348  *   Size of field to extract.
349  *
350  * @return
351  *   converted field in host endian format.
352  */
353 static inline uint32_t
354 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
355 {
356         uint32_t ret;
357
358         switch (size) {
359         case 1:
360                 ret = *data;
361                 break;
362         case 2:
363                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
364                 break;
365         case 3:
366                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
367                 ret = (ret << 8) | *(data + sizeof(uint16_t));
368                 break;
369         case 4:
370                 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
371                 break;
372         default:
373                 MLX5_ASSERT(false);
374                 ret = 0;
375                 break;
376         }
377         return ret;
378 }
379
380 /**
381  * Convert modify-header action to DV specification.
382  *
383  * Data length of each action is determined by provided field description
384  * and the item mask. Data bit offset and width of each action is determined
385  * by provided item mask.
386  *
387  * @param[in] item
388  *   Pointer to item specification.
389  * @param[in] field
390  *   Pointer to field modification information.
391  *     For MLX5_MODIFICATION_TYPE_SET specifies destination field.
392  *     For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
393  *     For MLX5_MODIFICATION_TYPE_COPY specifies source field.
394  * @param[in] dcopy
395  *   Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
396  *   Negative offset value sets the same offset as source offset.
397  *   size field is ignored, value is taken from source field.
398  * @param[in,out] resource
399  *   Pointer to the modify-header resource.
400  * @param[in] type
401  *   Type of modification.
402  * @param[out] error
403  *   Pointer to the error structure.
404  *
405  * @return
406  *   0 on success, a negative errno value otherwise and rte_errno is set.
407  */
408 static int
409 flow_dv_convert_modify_action(struct rte_flow_item *item,
410                               struct field_modify_info *field,
411                               struct field_modify_info *dcopy,
412                               struct mlx5_flow_dv_modify_hdr_resource *resource,
413                               uint32_t type, struct rte_flow_error *error)
414 {
415         uint32_t i = resource->actions_num;
416         struct mlx5_modification_cmd *actions = resource->actions;
417         uint32_t carry_b = 0;
418
419         /*
420          * The item and mask are provided in big-endian format.
421          * The fields should be presented as in big-endian format either.
422          * Mask must be always present, it defines the actual field width.
423          */
424         MLX5_ASSERT(item->mask);
425         MLX5_ASSERT(field->size);
426         do {
427                 uint32_t size_b;
428                 uint32_t off_b;
429                 uint32_t mask;
430                 uint32_t data;
431                 bool next_field = true;
432                 bool next_dcopy = true;
433
434                 if (i >= MLX5_MAX_MODIFY_NUM)
435                         return rte_flow_error_set(error, EINVAL,
436                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
437                                  "too many items to modify");
438                 /* Fetch variable byte size mask from the array. */
439                 mask = flow_dv_fetch_field((const uint8_t *)item->mask +
440                                            field->offset, field->size);
441                 if (!mask) {
442                         ++field;
443                         continue;
444                 }
445                 /* Deduce actual data width in bits from mask value. */
446                 off_b = rte_bsf32(mask) + carry_b;
447                 size_b = sizeof(uint32_t) * CHAR_BIT -
448                          off_b - __builtin_clz(mask);
449                 MLX5_ASSERT(size_b);
450                 actions[i] = (struct mlx5_modification_cmd) {
451                         .action_type = type,
452                         .field = field->id,
453                         .offset = off_b,
454                         .length = (size_b == sizeof(uint32_t) * CHAR_BIT) ?
455                                 0 : size_b,
456                 };
457                 if (type == MLX5_MODIFICATION_TYPE_COPY) {
458                         MLX5_ASSERT(dcopy);
459                         actions[i].dst_field = dcopy->id;
460                         actions[i].dst_offset =
461                                 (int)dcopy->offset < 0 ? off_b : dcopy->offset;
462                         /* Convert entire record to big-endian format. */
463                         actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
464                         /*
465                          * Destination field overflow. Copy leftovers of
466                          * a source field to the next destination field.
467                          */
468                         carry_b = 0;
469                         if ((size_b > dcopy->size * CHAR_BIT - dcopy->offset) &&
470                             dcopy->size != 0) {
471                                 actions[i].length =
472                                         dcopy->size * CHAR_BIT - dcopy->offset;
473                                 carry_b = actions[i].length;
474                                 next_field = false;
475                         }
476                         /*
477                          * Not enough bits in a source filed to fill a
478                          * destination field. Switch to the next source.
479                          */
480                         if ((size_b < dcopy->size * CHAR_BIT - dcopy->offset) &&
481                             (size_b == field->size * CHAR_BIT - off_b)) {
482                                 actions[i].length =
483                                         field->size * CHAR_BIT - off_b;
484                                 dcopy->offset += actions[i].length;
485                                 next_dcopy = false;
486                         }
487                         if (next_dcopy)
488                                 ++dcopy;
489                 } else {
490                         MLX5_ASSERT(item->spec);
491                         data = flow_dv_fetch_field((const uint8_t *)item->spec +
492                                                    field->offset, field->size);
493                         /* Shift out the trailing masked bits from data. */
494                         data = (data & mask) >> off_b;
495                         actions[i].data1 = rte_cpu_to_be_32(data);
496                 }
497                 /* Convert entire record to expected big-endian format. */
498                 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
499                 if (next_field)
500                         ++field;
501                 ++i;
502         } while (field->size);
503         if (resource->actions_num == i)
504                 return rte_flow_error_set(error, EINVAL,
505                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
506                                           "invalid modification flow item");
507         resource->actions_num = i;
508         return 0;
509 }
510
511 /**
512  * Convert modify-header set IPv4 address action to DV specification.
513  *
514  * @param[in,out] resource
515  *   Pointer to the modify-header resource.
516  * @param[in] action
517  *   Pointer to action specification.
518  * @param[out] error
519  *   Pointer to the error structure.
520  *
521  * @return
522  *   0 on success, a negative errno value otherwise and rte_errno is set.
523  */
524 static int
525 flow_dv_convert_action_modify_ipv4
526                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
527                          const struct rte_flow_action *action,
528                          struct rte_flow_error *error)
529 {
530         const struct rte_flow_action_set_ipv4 *conf =
531                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
532         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
533         struct rte_flow_item_ipv4 ipv4;
534         struct rte_flow_item_ipv4 ipv4_mask;
535
536         memset(&ipv4, 0, sizeof(ipv4));
537         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
538         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
539                 ipv4.hdr.src_addr = conf->ipv4_addr;
540                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
541         } else {
542                 ipv4.hdr.dst_addr = conf->ipv4_addr;
543                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
544         }
545         item.spec = &ipv4;
546         item.mask = &ipv4_mask;
547         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
548                                              MLX5_MODIFICATION_TYPE_SET, error);
549 }
550
551 /**
552  * Convert modify-header set IPv6 address action to DV specification.
553  *
554  * @param[in,out] resource
555  *   Pointer to the modify-header resource.
556  * @param[in] action
557  *   Pointer to action specification.
558  * @param[out] error
559  *   Pointer to the error structure.
560  *
561  * @return
562  *   0 on success, a negative errno value otherwise and rte_errno is set.
563  */
564 static int
565 flow_dv_convert_action_modify_ipv6
566                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
567                          const struct rte_flow_action *action,
568                          struct rte_flow_error *error)
569 {
570         const struct rte_flow_action_set_ipv6 *conf =
571                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
572         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
573         struct rte_flow_item_ipv6 ipv6;
574         struct rte_flow_item_ipv6 ipv6_mask;
575
576         memset(&ipv6, 0, sizeof(ipv6));
577         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
578         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
579                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
580                        sizeof(ipv6.hdr.src_addr));
581                 memcpy(&ipv6_mask.hdr.src_addr,
582                        &rte_flow_item_ipv6_mask.hdr.src_addr,
583                        sizeof(ipv6.hdr.src_addr));
584         } else {
585                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
586                        sizeof(ipv6.hdr.dst_addr));
587                 memcpy(&ipv6_mask.hdr.dst_addr,
588                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
589                        sizeof(ipv6.hdr.dst_addr));
590         }
591         item.spec = &ipv6;
592         item.mask = &ipv6_mask;
593         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
594                                              MLX5_MODIFICATION_TYPE_SET, error);
595 }
596
597 /**
598  * Convert modify-header set MAC address action to DV specification.
599  *
600  * @param[in,out] resource
601  *   Pointer to the modify-header resource.
602  * @param[in] action
603  *   Pointer to action specification.
604  * @param[out] error
605  *   Pointer to the error structure.
606  *
607  * @return
608  *   0 on success, a negative errno value otherwise and rte_errno is set.
609  */
610 static int
611 flow_dv_convert_action_modify_mac
612                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
613                          const struct rte_flow_action *action,
614                          struct rte_flow_error *error)
615 {
616         const struct rte_flow_action_set_mac *conf =
617                 (const struct rte_flow_action_set_mac *)(action->conf);
618         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
619         struct rte_flow_item_eth eth;
620         struct rte_flow_item_eth eth_mask;
621
622         memset(&eth, 0, sizeof(eth));
623         memset(&eth_mask, 0, sizeof(eth_mask));
624         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
625                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
626                        sizeof(eth.src.addr_bytes));
627                 memcpy(&eth_mask.src.addr_bytes,
628                        &rte_flow_item_eth_mask.src.addr_bytes,
629                        sizeof(eth_mask.src.addr_bytes));
630         } else {
631                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
632                        sizeof(eth.dst.addr_bytes));
633                 memcpy(&eth_mask.dst.addr_bytes,
634                        &rte_flow_item_eth_mask.dst.addr_bytes,
635                        sizeof(eth_mask.dst.addr_bytes));
636         }
637         item.spec = &eth;
638         item.mask = &eth_mask;
639         return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
640                                              MLX5_MODIFICATION_TYPE_SET, error);
641 }
642
643 /**
644  * Convert modify-header set VLAN VID action to DV specification.
645  *
646  * @param[in,out] resource
647  *   Pointer to the modify-header resource.
648  * @param[in] action
649  *   Pointer to action specification.
650  * @param[out] error
651  *   Pointer to the error structure.
652  *
653  * @return
654  *   0 on success, a negative errno value otherwise and rte_errno is set.
655  */
656 static int
657 flow_dv_convert_action_modify_vlan_vid
658                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
659                          const struct rte_flow_action *action,
660                          struct rte_flow_error *error)
661 {
662         const struct rte_flow_action_of_set_vlan_vid *conf =
663                 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
664         int i = resource->actions_num;
665         struct mlx5_modification_cmd *actions = resource->actions;
666         struct field_modify_info *field = modify_vlan_out_first_vid;
667
668         if (i >= MLX5_MAX_MODIFY_NUM)
669                 return rte_flow_error_set(error, EINVAL,
670                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
671                          "too many items to modify");
672         actions[i] = (struct mlx5_modification_cmd) {
673                 .action_type = MLX5_MODIFICATION_TYPE_SET,
674                 .field = field->id,
675                 .length = field->size,
676                 .offset = field->offset,
677         };
678         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
679         actions[i].data1 = conf->vlan_vid;
680         actions[i].data1 = actions[i].data1 << 16;
681         resource->actions_num = ++i;
682         return 0;
683 }
684
685 /**
686  * Convert modify-header set TP action to DV specification.
687  *
688  * @param[in,out] resource
689  *   Pointer to the modify-header resource.
690  * @param[in] action
691  *   Pointer to action specification.
692  * @param[in] items
693  *   Pointer to rte_flow_item objects list.
694  * @param[in] attr
695  *   Pointer to flow attributes structure.
696  * @param[in] dev_flow
697  *   Pointer to the sub flow.
698  * @param[in] tunnel_decap
699  *   Whether action is after tunnel decapsulation.
700  * @param[out] error
701  *   Pointer to the error structure.
702  *
703  * @return
704  *   0 on success, a negative errno value otherwise and rte_errno is set.
705  */
706 static int
707 flow_dv_convert_action_modify_tp
708                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
709                          const struct rte_flow_action *action,
710                          const struct rte_flow_item *items,
711                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
712                          bool tunnel_decap, struct rte_flow_error *error)
713 {
714         const struct rte_flow_action_set_tp *conf =
715                 (const struct rte_flow_action_set_tp *)(action->conf);
716         struct rte_flow_item item;
717         struct rte_flow_item_udp udp;
718         struct rte_flow_item_udp udp_mask;
719         struct rte_flow_item_tcp tcp;
720         struct rte_flow_item_tcp tcp_mask;
721         struct field_modify_info *field;
722
723         if (!attr->valid)
724                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
725         if (attr->udp) {
726                 memset(&udp, 0, sizeof(udp));
727                 memset(&udp_mask, 0, sizeof(udp_mask));
728                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
729                         udp.hdr.src_port = conf->port;
730                         udp_mask.hdr.src_port =
731                                         rte_flow_item_udp_mask.hdr.src_port;
732                 } else {
733                         udp.hdr.dst_port = conf->port;
734                         udp_mask.hdr.dst_port =
735                                         rte_flow_item_udp_mask.hdr.dst_port;
736                 }
737                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
738                 item.spec = &udp;
739                 item.mask = &udp_mask;
740                 field = modify_udp;
741         } else {
742                 MLX5_ASSERT(attr->tcp);
743                 memset(&tcp, 0, sizeof(tcp));
744                 memset(&tcp_mask, 0, sizeof(tcp_mask));
745                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
746                         tcp.hdr.src_port = conf->port;
747                         tcp_mask.hdr.src_port =
748                                         rte_flow_item_tcp_mask.hdr.src_port;
749                 } else {
750                         tcp.hdr.dst_port = conf->port;
751                         tcp_mask.hdr.dst_port =
752                                         rte_flow_item_tcp_mask.hdr.dst_port;
753                 }
754                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
755                 item.spec = &tcp;
756                 item.mask = &tcp_mask;
757                 field = modify_tcp;
758         }
759         return flow_dv_convert_modify_action(&item, field, NULL, resource,
760                                              MLX5_MODIFICATION_TYPE_SET, error);
761 }
762
763 /**
764  * Convert modify-header set TTL action to DV specification.
765  *
766  * @param[in,out] resource
767  *   Pointer to the modify-header resource.
768  * @param[in] action
769  *   Pointer to action specification.
770  * @param[in] items
771  *   Pointer to rte_flow_item objects list.
772  * @param[in] attr
773  *   Pointer to flow attributes structure.
774  * @param[in] dev_flow
775  *   Pointer to the sub flow.
776  * @param[in] tunnel_decap
777  *   Whether action is after tunnel decapsulation.
778  * @param[out] error
779  *   Pointer to the error structure.
780  *
781  * @return
782  *   0 on success, a negative errno value otherwise and rte_errno is set.
783  */
784 static int
785 flow_dv_convert_action_modify_ttl
786                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
787                          const struct rte_flow_action *action,
788                          const struct rte_flow_item *items,
789                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
790                          bool tunnel_decap, struct rte_flow_error *error)
791 {
792         const struct rte_flow_action_set_ttl *conf =
793                 (const struct rte_flow_action_set_ttl *)(action->conf);
794         struct rte_flow_item item;
795         struct rte_flow_item_ipv4 ipv4;
796         struct rte_flow_item_ipv4 ipv4_mask;
797         struct rte_flow_item_ipv6 ipv6;
798         struct rte_flow_item_ipv6 ipv6_mask;
799         struct field_modify_info *field;
800
801         if (!attr->valid)
802                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
803         if (attr->ipv4) {
804                 memset(&ipv4, 0, sizeof(ipv4));
805                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
806                 ipv4.hdr.time_to_live = conf->ttl_value;
807                 ipv4_mask.hdr.time_to_live = 0xFF;
808                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
809                 item.spec = &ipv4;
810                 item.mask = &ipv4_mask;
811                 field = modify_ipv4;
812         } else {
813                 MLX5_ASSERT(attr->ipv6);
814                 memset(&ipv6, 0, sizeof(ipv6));
815                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
816                 ipv6.hdr.hop_limits = conf->ttl_value;
817                 ipv6_mask.hdr.hop_limits = 0xFF;
818                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
819                 item.spec = &ipv6;
820                 item.mask = &ipv6_mask;
821                 field = modify_ipv6;
822         }
823         return flow_dv_convert_modify_action(&item, field, NULL, resource,
824                                              MLX5_MODIFICATION_TYPE_SET, error);
825 }
826
827 /**
828  * Convert modify-header decrement TTL action to DV specification.
829  *
830  * @param[in,out] resource
831  *   Pointer to the modify-header resource.
832  * @param[in] action
833  *   Pointer to action specification.
834  * @param[in] items
835  *   Pointer to rte_flow_item objects list.
836  * @param[in] attr
837  *   Pointer to flow attributes structure.
838  * @param[in] dev_flow
839  *   Pointer to the sub flow.
840  * @param[in] tunnel_decap
841  *   Whether action is after tunnel decapsulation.
842  * @param[out] error
843  *   Pointer to the error structure.
844  *
845  * @return
846  *   0 on success, a negative errno value otherwise and rte_errno is set.
847  */
848 static int
849 flow_dv_convert_action_modify_dec_ttl
850                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
851                          const struct rte_flow_item *items,
852                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
853                          bool tunnel_decap, struct rte_flow_error *error)
854 {
855         struct rte_flow_item item;
856         struct rte_flow_item_ipv4 ipv4;
857         struct rte_flow_item_ipv4 ipv4_mask;
858         struct rte_flow_item_ipv6 ipv6;
859         struct rte_flow_item_ipv6 ipv6_mask;
860         struct field_modify_info *field;
861
862         if (!attr->valid)
863                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
864         if (attr->ipv4) {
865                 memset(&ipv4, 0, sizeof(ipv4));
866                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
867                 ipv4.hdr.time_to_live = 0xFF;
868                 ipv4_mask.hdr.time_to_live = 0xFF;
869                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
870                 item.spec = &ipv4;
871                 item.mask = &ipv4_mask;
872                 field = modify_ipv4;
873         } else {
874                 MLX5_ASSERT(attr->ipv6);
875                 memset(&ipv6, 0, sizeof(ipv6));
876                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
877                 ipv6.hdr.hop_limits = 0xFF;
878                 ipv6_mask.hdr.hop_limits = 0xFF;
879                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
880                 item.spec = &ipv6;
881                 item.mask = &ipv6_mask;
882                 field = modify_ipv6;
883         }
884         return flow_dv_convert_modify_action(&item, field, NULL, resource,
885                                              MLX5_MODIFICATION_TYPE_ADD, error);
886 }
887
888 /**
889  * Convert modify-header increment/decrement TCP Sequence number
890  * to DV specification.
891  *
892  * @param[in,out] resource
893  *   Pointer to the modify-header resource.
894  * @param[in] action
895  *   Pointer to action specification.
896  * @param[out] error
897  *   Pointer to the error structure.
898  *
899  * @return
900  *   0 on success, a negative errno value otherwise and rte_errno is set.
901  */
902 static int
903 flow_dv_convert_action_modify_tcp_seq
904                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
905                          const struct rte_flow_action *action,
906                          struct rte_flow_error *error)
907 {
908         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
909         uint64_t value = rte_be_to_cpu_32(*conf);
910         struct rte_flow_item item;
911         struct rte_flow_item_tcp tcp;
912         struct rte_flow_item_tcp tcp_mask;
913
914         memset(&tcp, 0, sizeof(tcp));
915         memset(&tcp_mask, 0, sizeof(tcp_mask));
916         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
917                 /*
918                  * The HW has no decrement operation, only increment operation.
919                  * To simulate decrement X from Y using increment operation
920                  * we need to add UINT32_MAX X times to Y.
921                  * Each adding of UINT32_MAX decrements Y by 1.
922                  */
923                 value *= UINT32_MAX;
924         tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
925         tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
926         item.type = RTE_FLOW_ITEM_TYPE_TCP;
927         item.spec = &tcp;
928         item.mask = &tcp_mask;
929         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
930                                              MLX5_MODIFICATION_TYPE_ADD, error);
931 }
932
933 /**
934  * Convert modify-header increment/decrement TCP Acknowledgment number
935  * to DV specification.
936  *
937  * @param[in,out] resource
938  *   Pointer to the modify-header resource.
939  * @param[in] action
940  *   Pointer to action specification.
941  * @param[out] error
942  *   Pointer to the error structure.
943  *
944  * @return
945  *   0 on success, a negative errno value otherwise and rte_errno is set.
946  */
947 static int
948 flow_dv_convert_action_modify_tcp_ack
949                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
950                          const struct rte_flow_action *action,
951                          struct rte_flow_error *error)
952 {
953         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
954         uint64_t value = rte_be_to_cpu_32(*conf);
955         struct rte_flow_item item;
956         struct rte_flow_item_tcp tcp;
957         struct rte_flow_item_tcp tcp_mask;
958
959         memset(&tcp, 0, sizeof(tcp));
960         memset(&tcp_mask, 0, sizeof(tcp_mask));
961         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
962                 /*
963                  * The HW has no decrement operation, only increment operation.
964                  * To simulate decrement X from Y using increment operation
965                  * we need to add UINT32_MAX X times to Y.
966                  * Each adding of UINT32_MAX decrements Y by 1.
967                  */
968                 value *= UINT32_MAX;
969         tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
970         tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
971         item.type = RTE_FLOW_ITEM_TYPE_TCP;
972         item.spec = &tcp;
973         item.mask = &tcp_mask;
974         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
975                                              MLX5_MODIFICATION_TYPE_ADD, error);
976 }
977
978 static enum mlx5_modification_field reg_to_field[] = {
979         [REG_NON] = MLX5_MODI_OUT_NONE,
980         [REG_A] = MLX5_MODI_META_DATA_REG_A,
981         [REG_B] = MLX5_MODI_META_DATA_REG_B,
982         [REG_C_0] = MLX5_MODI_META_REG_C_0,
983         [REG_C_1] = MLX5_MODI_META_REG_C_1,
984         [REG_C_2] = MLX5_MODI_META_REG_C_2,
985         [REG_C_3] = MLX5_MODI_META_REG_C_3,
986         [REG_C_4] = MLX5_MODI_META_REG_C_4,
987         [REG_C_5] = MLX5_MODI_META_REG_C_5,
988         [REG_C_6] = MLX5_MODI_META_REG_C_6,
989         [REG_C_7] = MLX5_MODI_META_REG_C_7,
990 };
991
992 /**
993  * Convert register set to DV specification.
994  *
995  * @param[in,out] resource
996  *   Pointer to the modify-header resource.
997  * @param[in] action
998  *   Pointer to action specification.
999  * @param[out] error
1000  *   Pointer to the error structure.
1001  *
1002  * @return
1003  *   0 on success, a negative errno value otherwise and rte_errno is set.
1004  */
1005 static int
1006 flow_dv_convert_action_set_reg
1007                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1008                          const struct rte_flow_action *action,
1009                          struct rte_flow_error *error)
1010 {
1011         const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
1012         struct mlx5_modification_cmd *actions = resource->actions;
1013         uint32_t i = resource->actions_num;
1014
1015         if (i >= MLX5_MAX_MODIFY_NUM)
1016                 return rte_flow_error_set(error, EINVAL,
1017                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1018                                           "too many items to modify");
1019         MLX5_ASSERT(conf->id != REG_NON);
1020         MLX5_ASSERT(conf->id < (enum modify_reg)RTE_DIM(reg_to_field));
1021         actions[i] = (struct mlx5_modification_cmd) {
1022                 .action_type = MLX5_MODIFICATION_TYPE_SET,
1023                 .field = reg_to_field[conf->id],
1024                 .offset = conf->offset,
1025                 .length = conf->length,
1026         };
1027         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
1028         actions[i].data1 = rte_cpu_to_be_32(conf->data);
1029         ++i;
1030         resource->actions_num = i;
1031         return 0;
1032 }
1033
1034 /**
1035  * Convert SET_TAG action to DV specification.
1036  *
1037  * @param[in] dev
1038  *   Pointer to the rte_eth_dev structure.
1039  * @param[in,out] resource
1040  *   Pointer to the modify-header resource.
1041  * @param[in] conf
1042  *   Pointer to action specification.
1043  * @param[out] error
1044  *   Pointer to the error structure.
1045  *
1046  * @return
1047  *   0 on success, a negative errno value otherwise and rte_errno is set.
1048  */
1049 static int
1050 flow_dv_convert_action_set_tag
1051                         (struct rte_eth_dev *dev,
1052                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1053                          const struct rte_flow_action_set_tag *conf,
1054                          struct rte_flow_error *error)
1055 {
1056         rte_be32_t data = rte_cpu_to_be_32(conf->data);
1057         rte_be32_t mask = rte_cpu_to_be_32(conf->mask);
1058         struct rte_flow_item item = {
1059                 .spec = &data,
1060                 .mask = &mask,
1061         };
1062         struct field_modify_info reg_c_x[] = {
1063                 [1] = {0, 0, 0},
1064         };
1065         enum mlx5_modification_field reg_type;
1066         int ret;
1067
1068         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
1069         if (ret < 0)
1070                 return ret;
1071         MLX5_ASSERT(ret != REG_NON);
1072         MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
1073         reg_type = reg_to_field[ret];
1074         MLX5_ASSERT(reg_type > 0);
1075         reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
1076         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1077                                              MLX5_MODIFICATION_TYPE_SET, error);
1078 }
1079
1080 /**
1081  * Convert internal COPY_REG action to DV specification.
1082  *
1083  * @param[in] dev
1084  *   Pointer to the rte_eth_dev structure.
1085  * @param[in,out] res
1086  *   Pointer to the modify-header resource.
1087  * @param[in] action
1088  *   Pointer to action specification.
1089  * @param[out] error
1090  *   Pointer to the error structure.
1091  *
1092  * @return
1093  *   0 on success, a negative errno value otherwise and rte_errno is set.
1094  */
1095 static int
1096 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
1097                                  struct mlx5_flow_dv_modify_hdr_resource *res,
1098                                  const struct rte_flow_action *action,
1099                                  struct rte_flow_error *error)
1100 {
1101         const struct mlx5_flow_action_copy_mreg *conf = action->conf;
1102         rte_be32_t mask = RTE_BE32(UINT32_MAX);
1103         struct rte_flow_item item = {
1104                 .spec = NULL,
1105                 .mask = &mask,
1106         };
1107         struct field_modify_info reg_src[] = {
1108                 {4, 0, reg_to_field[conf->src]},
1109                 {0, 0, 0},
1110         };
1111         struct field_modify_info reg_dst = {
1112                 .offset = 0,
1113                 .id = reg_to_field[conf->dst],
1114         };
1115         /* Adjust reg_c[0] usage according to reported mask. */
1116         if (conf->dst == REG_C_0 || conf->src == REG_C_0) {
1117                 struct mlx5_priv *priv = dev->data->dev_private;
1118                 uint32_t reg_c0 = priv->sh->dv_regc0_mask;
1119
1120                 MLX5_ASSERT(reg_c0);
1121                 MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
1122                 if (conf->dst == REG_C_0) {
1123                         /* Copy to reg_c[0], within mask only. */
1124                         reg_dst.offset = rte_bsf32(reg_c0);
1125                         /*
1126                          * Mask is ignoring the enianness, because
1127                          * there is no conversion in datapath.
1128                          */
1129 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1130                         /* Copy from destination lower bits to reg_c[0]. */
1131                         mask = reg_c0 >> reg_dst.offset;
1132 #else
1133                         /* Copy from destination upper bits to reg_c[0]. */
1134                         mask = reg_c0 << (sizeof(reg_c0) * CHAR_BIT -
1135                                           rte_fls_u32(reg_c0));
1136 #endif
1137                 } else {
1138                         mask = rte_cpu_to_be_32(reg_c0);
1139 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1140                         /* Copy from reg_c[0] to destination lower bits. */
1141                         reg_dst.offset = 0;
1142 #else
1143                         /* Copy from reg_c[0] to destination upper bits. */
1144                         reg_dst.offset = sizeof(reg_c0) * CHAR_BIT -
1145                                          (rte_fls_u32(reg_c0) -
1146                                           rte_bsf32(reg_c0));
1147 #endif
1148                 }
1149         }
1150         return flow_dv_convert_modify_action(&item,
1151                                              reg_src, &reg_dst, res,
1152                                              MLX5_MODIFICATION_TYPE_COPY,
1153                                              error);
1154 }
1155
1156 /**
1157  * Convert MARK action to DV specification. This routine is used
1158  * in extensive metadata only and requires metadata register to be
1159  * handled. In legacy mode hardware tag resource is engaged.
1160  *
1161  * @param[in] dev
1162  *   Pointer to the rte_eth_dev structure.
1163  * @param[in] conf
1164  *   Pointer to MARK action specification.
1165  * @param[in,out] resource
1166  *   Pointer to the modify-header resource.
1167  * @param[out] error
1168  *   Pointer to the error structure.
1169  *
1170  * @return
1171  *   0 on success, a negative errno value otherwise and rte_errno is set.
1172  */
1173 static int
1174 flow_dv_convert_action_mark(struct rte_eth_dev *dev,
1175                             const struct rte_flow_action_mark *conf,
1176                             struct mlx5_flow_dv_modify_hdr_resource *resource,
1177                             struct rte_flow_error *error)
1178 {
1179         struct mlx5_priv *priv = dev->data->dev_private;
1180         rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK &
1181                                            priv->sh->dv_mark_mask);
1182         rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask;
1183         struct rte_flow_item item = {
1184                 .spec = &data,
1185                 .mask = &mask,
1186         };
1187         struct field_modify_info reg_c_x[] = {
1188                 [1] = {0, 0, 0},
1189         };
1190         int reg;
1191
1192         if (!mask)
1193                 return rte_flow_error_set(error, EINVAL,
1194                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1195                                           NULL, "zero mark action mask");
1196         reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1197         if (reg < 0)
1198                 return reg;
1199         MLX5_ASSERT(reg > 0);
1200         if (reg == REG_C_0) {
1201                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1202                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1203
1204                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1205                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1206                 mask = rte_cpu_to_be_32(mask << shl_c0);
1207         }
1208         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1209         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1210                                              MLX5_MODIFICATION_TYPE_SET, error);
1211 }
1212
1213 /**
1214  * Get metadata register index for specified steering domain.
1215  *
1216  * @param[in] dev
1217  *   Pointer to the rte_eth_dev structure.
1218  * @param[in] attr
1219  *   Attributes of flow to determine steering domain.
1220  * @param[out] error
1221  *   Pointer to the error structure.
1222  *
1223  * @return
1224  *   positive index on success, a negative errno value otherwise
1225  *   and rte_errno is set.
1226  */
1227 static enum modify_reg
1228 flow_dv_get_metadata_reg(struct rte_eth_dev *dev,
1229                          const struct rte_flow_attr *attr,
1230                          struct rte_flow_error *error)
1231 {
1232         int reg =
1233                 mlx5_flow_get_reg_id(dev, attr->transfer ?
1234                                           MLX5_METADATA_FDB :
1235                                             attr->egress ?
1236                                             MLX5_METADATA_TX :
1237                                             MLX5_METADATA_RX, 0, error);
1238         if (reg < 0)
1239                 return rte_flow_error_set(error,
1240                                           ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1241                                           NULL, "unavailable "
1242                                           "metadata register");
1243         return reg;
1244 }
1245
1246 /**
1247  * Convert SET_META action to DV specification.
1248  *
1249  * @param[in] dev
1250  *   Pointer to the rte_eth_dev structure.
1251  * @param[in,out] resource
1252  *   Pointer to the modify-header resource.
1253  * @param[in] attr
1254  *   Attributes of flow that includes this item.
1255  * @param[in] conf
1256  *   Pointer to action specification.
1257  * @param[out] error
1258  *   Pointer to the error structure.
1259  *
1260  * @return
1261  *   0 on success, a negative errno value otherwise and rte_errno is set.
1262  */
1263 static int
1264 flow_dv_convert_action_set_meta
1265                         (struct rte_eth_dev *dev,
1266                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1267                          const struct rte_flow_attr *attr,
1268                          const struct rte_flow_action_set_meta *conf,
1269                          struct rte_flow_error *error)
1270 {
1271         uint32_t mask = rte_cpu_to_be_32(conf->mask);
1272         uint32_t data = rte_cpu_to_be_32(conf->data) & mask;
1273         struct rte_flow_item item = {
1274                 .spec = &data,
1275                 .mask = &mask,
1276         };
1277         struct field_modify_info reg_c_x[] = {
1278                 [1] = {0, 0, 0},
1279         };
1280         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1281
1282         if (reg < 0)
1283                 return reg;
1284         MLX5_ASSERT(reg != REG_NON);
1285         if (reg == REG_C_0) {
1286                 struct mlx5_priv *priv = dev->data->dev_private;
1287                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1288                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1289
1290                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1291                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1292                 mask = rte_cpu_to_be_32(mask << shl_c0);
1293         }
1294         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1295         /* The routine expects parameters in memory as big-endian ones. */
1296         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1297                                              MLX5_MODIFICATION_TYPE_SET, error);
1298 }
1299
1300 /**
1301  * Convert modify-header set IPv4 DSCP action to DV specification.
1302  *
1303  * @param[in,out] resource
1304  *   Pointer to the modify-header resource.
1305  * @param[in] action
1306  *   Pointer to action specification.
1307  * @param[out] error
1308  *   Pointer to the error structure.
1309  *
1310  * @return
1311  *   0 on success, a negative errno value otherwise and rte_errno is set.
1312  */
1313 static int
1314 flow_dv_convert_action_modify_ipv4_dscp
1315                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1316                          const struct rte_flow_action *action,
1317                          struct rte_flow_error *error)
1318 {
1319         const struct rte_flow_action_set_dscp *conf =
1320                 (const struct rte_flow_action_set_dscp *)(action->conf);
1321         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
1322         struct rte_flow_item_ipv4 ipv4;
1323         struct rte_flow_item_ipv4 ipv4_mask;
1324
1325         memset(&ipv4, 0, sizeof(ipv4));
1326         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
1327         ipv4.hdr.type_of_service = conf->dscp;
1328         ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2;
1329         item.spec = &ipv4;
1330         item.mask = &ipv4_mask;
1331         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
1332                                              MLX5_MODIFICATION_TYPE_SET, error);
1333 }
1334
1335 /**
1336  * Convert modify-header set IPv6 DSCP action to DV specification.
1337  *
1338  * @param[in,out] resource
1339  *   Pointer to the modify-header resource.
1340  * @param[in] action
1341  *   Pointer to action specification.
1342  * @param[out] error
1343  *   Pointer to the error structure.
1344  *
1345  * @return
1346  *   0 on success, a negative errno value otherwise and rte_errno is set.
1347  */
1348 static int
1349 flow_dv_convert_action_modify_ipv6_dscp
1350                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1351                          const struct rte_flow_action *action,
1352                          struct rte_flow_error *error)
1353 {
1354         const struct rte_flow_action_set_dscp *conf =
1355                 (const struct rte_flow_action_set_dscp *)(action->conf);
1356         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
1357         struct rte_flow_item_ipv6 ipv6;
1358         struct rte_flow_item_ipv6 ipv6_mask;
1359
1360         memset(&ipv6, 0, sizeof(ipv6));
1361         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
1362         /*
1363          * Even though the DSCP bits offset of IPv6 is not byte aligned,
1364          * rdma-core only accept the DSCP bits byte aligned start from
1365          * bit 0 to 5 as to be compatible with IPv4. No need to shift the
1366          * bits in IPv6 case as rdma-core requires byte aligned value.
1367          */
1368         ipv6.hdr.vtc_flow = conf->dscp;
1369         ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22;
1370         item.spec = &ipv6;
1371         item.mask = &ipv6_mask;
1372         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
1373                                              MLX5_MODIFICATION_TYPE_SET, error);
1374 }
1375
1376 static int
1377 mlx5_flow_item_field_width(struct mlx5_dev_config *config,
1378                            enum rte_flow_field_id field)
1379 {
1380         switch (field) {
1381         case RTE_FLOW_FIELD_START:
1382                 return 32;
1383         case RTE_FLOW_FIELD_MAC_DST:
1384         case RTE_FLOW_FIELD_MAC_SRC:
1385                 return 48;
1386         case RTE_FLOW_FIELD_VLAN_TYPE:
1387                 return 16;
1388         case RTE_FLOW_FIELD_VLAN_ID:
1389                 return 12;
1390         case RTE_FLOW_FIELD_MAC_TYPE:
1391                 return 16;
1392         case RTE_FLOW_FIELD_IPV4_DSCP:
1393                 return 6;
1394         case RTE_FLOW_FIELD_IPV4_TTL:
1395                 return 8;
1396         case RTE_FLOW_FIELD_IPV4_SRC:
1397         case RTE_FLOW_FIELD_IPV4_DST:
1398                 return 32;
1399         case RTE_FLOW_FIELD_IPV6_DSCP:
1400                 return 6;
1401         case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1402                 return 8;
1403         case RTE_FLOW_FIELD_IPV6_SRC:
1404         case RTE_FLOW_FIELD_IPV6_DST:
1405                 return 128;
1406         case RTE_FLOW_FIELD_TCP_PORT_SRC:
1407         case RTE_FLOW_FIELD_TCP_PORT_DST:
1408                 return 16;
1409         case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1410         case RTE_FLOW_FIELD_TCP_ACK_NUM:
1411                 return 32;
1412         case RTE_FLOW_FIELD_TCP_FLAGS:
1413                 return 9;
1414         case RTE_FLOW_FIELD_UDP_PORT_SRC:
1415         case RTE_FLOW_FIELD_UDP_PORT_DST:
1416                 return 16;
1417         case RTE_FLOW_FIELD_VXLAN_VNI:
1418         case RTE_FLOW_FIELD_GENEVE_VNI:
1419                 return 24;
1420         case RTE_FLOW_FIELD_GTP_TEID:
1421         case RTE_FLOW_FIELD_TAG:
1422                 return 32;
1423         case RTE_FLOW_FIELD_MARK:
1424                 return 24;
1425         case RTE_FLOW_FIELD_META:
1426                 if (config->dv_xmeta_en == MLX5_XMETA_MODE_META16)
1427                         return 16;
1428                 else if (config->dv_xmeta_en == MLX5_XMETA_MODE_META32)
1429                         return 32;
1430                 else
1431                         return 0;
1432         case RTE_FLOW_FIELD_POINTER:
1433         case RTE_FLOW_FIELD_VALUE:
1434                 return 64;
1435         default:
1436                 MLX5_ASSERT(false);
1437         }
1438         return 0;
1439 }
1440
1441 static void
1442 mlx5_flow_field_id_to_modify_info
1443                 (const struct rte_flow_action_modify_data *data,
1444                  struct field_modify_info *info,
1445                  uint32_t *mask, uint32_t *value,
1446                  uint32_t width, uint32_t dst_width,
1447                  struct rte_eth_dev *dev,
1448                  const struct rte_flow_attr *attr,
1449                  struct rte_flow_error *error)
1450 {
1451         struct mlx5_priv *priv = dev->data->dev_private;
1452         struct mlx5_dev_config *config = &priv->config;
1453         uint32_t idx = 0;
1454         uint32_t off = 0;
1455         uint64_t val = 0;
1456         switch (data->field) {
1457         case RTE_FLOW_FIELD_START:
1458                 /* not supported yet */
1459                 MLX5_ASSERT(false);
1460                 break;
1461         case RTE_FLOW_FIELD_MAC_DST:
1462                 off = data->offset > 16 ? data->offset - 16 : 0;
1463                 if (mask) {
1464                         if (data->offset < 16) {
1465                                 info[idx] = (struct field_modify_info){2, 0,
1466                                                 MLX5_MODI_OUT_DMAC_15_0};
1467                                 if (width < 16) {
1468                                         mask[idx] = rte_cpu_to_be_16(0xffff >>
1469                                                                  (16 - width));
1470                                         width = 0;
1471                                 } else {
1472                                         mask[idx] = RTE_BE16(0xffff);
1473                                         width -= 16;
1474                                 }
1475                                 if (!width)
1476                                         break;
1477                                 ++idx;
1478                         }
1479                         info[idx] = (struct field_modify_info){4, 4 * idx,
1480                                                 MLX5_MODI_OUT_DMAC_47_16};
1481                         mask[idx] = rte_cpu_to_be_32((0xffffffff >>
1482                                                       (32 - width)) << off);
1483                 } else {
1484                         if (data->offset < 16)
1485                                 info[idx++] = (struct field_modify_info){2, 0,
1486                                                 MLX5_MODI_OUT_DMAC_15_0};
1487                         info[idx] = (struct field_modify_info){4, off,
1488                                                 MLX5_MODI_OUT_DMAC_47_16};
1489                 }
1490                 break;
1491         case RTE_FLOW_FIELD_MAC_SRC:
1492                 off = data->offset > 16 ? data->offset - 16 : 0;
1493                 if (mask) {
1494                         if (data->offset < 16) {
1495                                 info[idx] = (struct field_modify_info){2, 0,
1496                                                 MLX5_MODI_OUT_SMAC_15_0};
1497                                 if (width < 16) {
1498                                         mask[idx] = rte_cpu_to_be_16(0xffff >>
1499                                                                  (16 - width));
1500                                         width = 0;
1501                                 } else {
1502                                         mask[idx] = RTE_BE16(0xffff);
1503                                         width -= 16;
1504                                 }
1505                                 if (!width)
1506                                         break;
1507                                 ++idx;
1508                         }
1509                         info[idx] = (struct field_modify_info){4, 4 * idx,
1510                                                 MLX5_MODI_OUT_SMAC_47_16};
1511                         mask[idx] = rte_cpu_to_be_32((0xffffffff >>
1512                                                       (32 - width)) << off);
1513                 } else {
1514                         if (data->offset < 16)
1515                                 info[idx++] = (struct field_modify_info){2, 0,
1516                                                 MLX5_MODI_OUT_SMAC_15_0};
1517                         info[idx] = (struct field_modify_info){4, off,
1518                                                 MLX5_MODI_OUT_SMAC_47_16};
1519                 }
1520                 break;
1521         case RTE_FLOW_FIELD_VLAN_TYPE:
1522                 /* not supported yet */
1523                 break;
1524         case RTE_FLOW_FIELD_VLAN_ID:
1525                 info[idx] = (struct field_modify_info){2, 0,
1526                                         MLX5_MODI_OUT_FIRST_VID};
1527                 if (mask)
1528                         mask[idx] = rte_cpu_to_be_16(0x0fff >> (12 - width));
1529                 break;
1530         case RTE_FLOW_FIELD_MAC_TYPE:
1531                 info[idx] = (struct field_modify_info){2, 0,
1532                                         MLX5_MODI_OUT_ETHERTYPE};
1533                 if (mask)
1534                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1535                 break;
1536         case RTE_FLOW_FIELD_IPV4_DSCP:
1537                 info[idx] = (struct field_modify_info){1, 0,
1538                                         MLX5_MODI_OUT_IP_DSCP};
1539                 if (mask)
1540                         mask[idx] = 0x3f >> (6 - width);
1541                 break;
1542         case RTE_FLOW_FIELD_IPV4_TTL:
1543                 info[idx] = (struct field_modify_info){1, 0,
1544                                         MLX5_MODI_OUT_IPV4_TTL};
1545                 if (mask)
1546                         mask[idx] = 0xff >> (8 - width);
1547                 break;
1548         case RTE_FLOW_FIELD_IPV4_SRC:
1549                 info[idx] = (struct field_modify_info){4, 0,
1550                                         MLX5_MODI_OUT_SIPV4};
1551                 if (mask)
1552                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1553                                                      (32 - width));
1554                 break;
1555         case RTE_FLOW_FIELD_IPV4_DST:
1556                 info[idx] = (struct field_modify_info){4, 0,
1557                                         MLX5_MODI_OUT_DIPV4};
1558                 if (mask)
1559                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1560                                                      (32 - width));
1561                 break;
1562         case RTE_FLOW_FIELD_IPV6_DSCP:
1563                 info[idx] = (struct field_modify_info){1, 0,
1564                                         MLX5_MODI_OUT_IP_DSCP};
1565                 if (mask)
1566                         mask[idx] = 0x3f >> (6 - width);
1567                 break;
1568         case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
1569                 info[idx] = (struct field_modify_info){1, 0,
1570                                         MLX5_MODI_OUT_IPV6_HOPLIMIT};
1571                 if (mask)
1572                         mask[idx] = 0xff >> (8 - width);
1573                 break;
1574         case RTE_FLOW_FIELD_IPV6_SRC:
1575                 if (mask) {
1576                         if (data->offset < 32) {
1577                                 info[idx] = (struct field_modify_info){4,
1578                                                 4 * idx,
1579                                                 MLX5_MODI_OUT_SIPV6_31_0};
1580                                 if (width < 32) {
1581                                         mask[idx] =
1582                                                 rte_cpu_to_be_32(0xffffffff >>
1583                                                                  (32 - width));
1584                                         width = 0;
1585                                 } else {
1586                                         mask[idx] = RTE_BE32(0xffffffff);
1587                                         width -= 32;
1588                                 }
1589                                 if (!width)
1590                                         break;
1591                                 ++idx;
1592                         }
1593                         if (data->offset < 64) {
1594                                 info[idx] = (struct field_modify_info){4,
1595                                                 4 * idx,
1596                                                 MLX5_MODI_OUT_SIPV6_63_32};
1597                                 if (width < 32) {
1598                                         mask[idx] =
1599                                                 rte_cpu_to_be_32(0xffffffff >>
1600                                                                  (32 - width));
1601                                         width = 0;
1602                                 } else {
1603                                         mask[idx] = RTE_BE32(0xffffffff);
1604                                         width -= 32;
1605                                 }
1606                                 if (!width)
1607                                         break;
1608                                 ++idx;
1609                         }
1610                         if (data->offset < 96) {
1611                                 info[idx] = (struct field_modify_info){4,
1612                                                 4 * idx,
1613                                                 MLX5_MODI_OUT_SIPV6_95_64};
1614                                 if (width < 32) {
1615                                         mask[idx] =
1616                                                 rte_cpu_to_be_32(0xffffffff >>
1617                                                                  (32 - width));
1618                                         width = 0;
1619                                 } else {
1620                                         mask[idx] = RTE_BE32(0xffffffff);
1621                                         width -= 32;
1622                                 }
1623                                 if (!width)
1624                                         break;
1625                                 ++idx;
1626                         }
1627                         info[idx] = (struct field_modify_info){4, 4 * idx,
1628                                                 MLX5_MODI_OUT_SIPV6_127_96};
1629                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1630                                                      (32 - width));
1631                 } else {
1632                         if (data->offset < 32)
1633                                 info[idx++] = (struct field_modify_info){4, 0,
1634                                                 MLX5_MODI_OUT_SIPV6_31_0};
1635                         if (data->offset < 64)
1636                                 info[idx++] = (struct field_modify_info){4, 0,
1637                                                 MLX5_MODI_OUT_SIPV6_63_32};
1638                         if (data->offset < 96)
1639                                 info[idx++] = (struct field_modify_info){4, 0,
1640                                                 MLX5_MODI_OUT_SIPV6_95_64};
1641                         if (data->offset < 128)
1642                                 info[idx++] = (struct field_modify_info){4, 0,
1643                                                 MLX5_MODI_OUT_SIPV6_127_96};
1644                 }
1645                 break;
1646         case RTE_FLOW_FIELD_IPV6_DST:
1647                 if (mask) {
1648                         if (data->offset < 32) {
1649                                 info[idx] = (struct field_modify_info){4,
1650                                                 4 * idx,
1651                                                 MLX5_MODI_OUT_DIPV6_31_0};
1652                                 if (width < 32) {
1653                                         mask[idx] =
1654                                                 rte_cpu_to_be_32(0xffffffff >>
1655                                                                  (32 - width));
1656                                         width = 0;
1657                                 } else {
1658                                         mask[idx] = RTE_BE32(0xffffffff);
1659                                         width -= 32;
1660                                 }
1661                                 if (!width)
1662                                         break;
1663                                 ++idx;
1664                         }
1665                         if (data->offset < 64) {
1666                                 info[idx] = (struct field_modify_info){4,
1667                                                 4 * idx,
1668                                                 MLX5_MODI_OUT_DIPV6_63_32};
1669                                 if (width < 32) {
1670                                         mask[idx] =
1671                                                 rte_cpu_to_be_32(0xffffffff >>
1672                                                                  (32 - width));
1673                                         width = 0;
1674                                 } else {
1675                                         mask[idx] = RTE_BE32(0xffffffff);
1676                                         width -= 32;
1677                                 }
1678                                 if (!width)
1679                                         break;
1680                                 ++idx;
1681                         }
1682                         if (data->offset < 96) {
1683                                 info[idx] = (struct field_modify_info){4,
1684                                                 4 * idx,
1685                                                 MLX5_MODI_OUT_DIPV6_95_64};
1686                                 if (width < 32) {
1687                                         mask[idx] =
1688                                                 rte_cpu_to_be_32(0xffffffff >>
1689                                                                  (32 - width));
1690                                         width = 0;
1691                                 } else {
1692                                         mask[idx] = RTE_BE32(0xffffffff);
1693                                         width -= 32;
1694                                 }
1695                                 if (!width)
1696                                         break;
1697                                 ++idx;
1698                         }
1699                         info[idx] = (struct field_modify_info){4, 4 * idx,
1700                                                 MLX5_MODI_OUT_DIPV6_127_96};
1701                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1702                                                      (32 - width));
1703                 } else {
1704                         if (data->offset < 32)
1705                                 info[idx++] = (struct field_modify_info){4, 0,
1706                                                 MLX5_MODI_OUT_DIPV6_31_0};
1707                         if (data->offset < 64)
1708                                 info[idx++] = (struct field_modify_info){4, 0,
1709                                                 MLX5_MODI_OUT_DIPV6_63_32};
1710                         if (data->offset < 96)
1711                                 info[idx++] = (struct field_modify_info){4, 0,
1712                                                 MLX5_MODI_OUT_DIPV6_95_64};
1713                         if (data->offset < 128)
1714                                 info[idx++] = (struct field_modify_info){4, 0,
1715                                                 MLX5_MODI_OUT_DIPV6_127_96};
1716                 }
1717                 break;
1718         case RTE_FLOW_FIELD_TCP_PORT_SRC:
1719                 info[idx] = (struct field_modify_info){2, 0,
1720                                         MLX5_MODI_OUT_TCP_SPORT};
1721                 if (mask)
1722                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1723                 break;
1724         case RTE_FLOW_FIELD_TCP_PORT_DST:
1725                 info[idx] = (struct field_modify_info){2, 0,
1726                                         MLX5_MODI_OUT_TCP_DPORT};
1727                 if (mask)
1728                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1729                 break;
1730         case RTE_FLOW_FIELD_TCP_SEQ_NUM:
1731                 info[idx] = (struct field_modify_info){4, 0,
1732                                         MLX5_MODI_OUT_TCP_SEQ_NUM};
1733                 if (mask)
1734                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1735                                                      (32 - width));
1736                 break;
1737         case RTE_FLOW_FIELD_TCP_ACK_NUM:
1738                 info[idx] = (struct field_modify_info){4, 0,
1739                                         MLX5_MODI_OUT_TCP_ACK_NUM};
1740                 if (mask)
1741                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1742                                                      (32 - width));
1743                 break;
1744         case RTE_FLOW_FIELD_TCP_FLAGS:
1745                 info[idx] = (struct field_modify_info){2, 0,
1746                                         MLX5_MODI_OUT_TCP_FLAGS};
1747                 if (mask)
1748                         mask[idx] = rte_cpu_to_be_16(0x1ff >> (9 - width));
1749                 break;
1750         case RTE_FLOW_FIELD_UDP_PORT_SRC:
1751                 info[idx] = (struct field_modify_info){2, 0,
1752                                         MLX5_MODI_OUT_UDP_SPORT};
1753                 if (mask)
1754                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1755                 break;
1756         case RTE_FLOW_FIELD_UDP_PORT_DST:
1757                 info[idx] = (struct field_modify_info){2, 0,
1758                                         MLX5_MODI_OUT_UDP_DPORT};
1759                 if (mask)
1760                         mask[idx] = rte_cpu_to_be_16(0xffff >> (16 - width));
1761                 break;
1762         case RTE_FLOW_FIELD_VXLAN_VNI:
1763                 /* not supported yet */
1764                 break;
1765         case RTE_FLOW_FIELD_GENEVE_VNI:
1766                 /* not supported yet*/
1767                 break;
1768         case RTE_FLOW_FIELD_GTP_TEID:
1769                 info[idx] = (struct field_modify_info){4, 0,
1770                                         MLX5_MODI_GTP_TEID};
1771                 if (mask)
1772                         mask[idx] = rte_cpu_to_be_32(0xffffffff >>
1773                                                      (32 - width));
1774                 break;
1775         case RTE_FLOW_FIELD_TAG:
1776                 {
1777                         int reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG,
1778                                                    data->level, error);
1779                         if (reg < 0)
1780                                 return;
1781                         MLX5_ASSERT(reg != REG_NON);
1782                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1783                         info[idx] = (struct field_modify_info){4, 0,
1784                                                 reg_to_field[reg]};
1785                         if (mask)
1786                                 mask[idx] =
1787                                         rte_cpu_to_be_32(0xffffffff >>
1788                                                          (32 - width));
1789                 }
1790                 break;
1791         case RTE_FLOW_FIELD_MARK:
1792                 {
1793                         int reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK,
1794                                                        0, error);
1795                         if (reg < 0)
1796                                 return;
1797                         MLX5_ASSERT(reg != REG_NON);
1798                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1799                         info[idx] = (struct field_modify_info){4, 0,
1800                                                 reg_to_field[reg]};
1801                         if (mask)
1802                                 mask[idx] =
1803                                         rte_cpu_to_be_32(0xffffffff >>
1804                                                          (32 - width));
1805                 }
1806                 break;
1807         case RTE_FLOW_FIELD_META:
1808                 {
1809                         unsigned int xmeta = config->dv_xmeta_en;
1810                         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1811                         if (reg < 0)
1812                                 return;
1813                         MLX5_ASSERT(reg != REG_NON);
1814                         MLX5_ASSERT((unsigned int)reg < RTE_DIM(reg_to_field));
1815                         if (xmeta == MLX5_XMETA_MODE_META16) {
1816                                 info[idx] = (struct field_modify_info){2, 0,
1817                                                         reg_to_field[reg]};
1818                                 if (mask)
1819                                         mask[idx] = rte_cpu_to_be_16(0xffff >>
1820                                                                 (16 - width));
1821                         } else if (xmeta == MLX5_XMETA_MODE_META32) {
1822                                 info[idx] = (struct field_modify_info){4, 0,
1823                                                         reg_to_field[reg]};
1824                                 if (mask)
1825                                         mask[idx] =
1826                                                 rte_cpu_to_be_32(0xffffffff >>
1827                                                                 (32 - width));
1828                         } else {
1829                                 MLX5_ASSERT(false);
1830                         }
1831                 }
1832                 break;
1833         case RTE_FLOW_FIELD_POINTER:
1834         case RTE_FLOW_FIELD_VALUE:
1835                 if (data->field == RTE_FLOW_FIELD_POINTER)
1836                         memcpy(&val, (void *)(uintptr_t)data->value,
1837                                sizeof(uint64_t));
1838                 else
1839                         val = data->value;
1840                 for (idx = 0; idx < MLX5_ACT_MAX_MOD_FIELDS; idx++) {
1841                         if (mask[idx]) {
1842                                 if (dst_width == 48) {
1843                                         /*special case for MAC addresses */
1844                                         value[idx] = rte_cpu_to_be_16(val);
1845                                         val >>= 16;
1846                                         dst_width -= 16;
1847                                 } else if (dst_width > 16) {
1848                                         value[idx] = rte_cpu_to_be_32(val);
1849                                         val >>= 32;
1850                                 } else if (dst_width > 8) {
1851                                         value[idx] = rte_cpu_to_be_16(val);
1852                                         val >>= 16;
1853                                 } else {
1854                                         value[idx] = (uint8_t)val;
1855                                         val >>= 8;
1856                                 }
1857                                 if (!val)
1858                                         break;
1859                         }
1860                 }
1861                 break;
1862         default:
1863                 MLX5_ASSERT(false);
1864                 break;
1865         }
1866 }
1867
1868 /**
1869  * Convert modify_field action to DV specification.
1870  *
1871  * @param[in] dev
1872  *   Pointer to the rte_eth_dev structure.
1873  * @param[in,out] resource
1874  *   Pointer to the modify-header resource.
1875  * @param[in] action
1876  *   Pointer to action specification.
1877  * @param[in] attr
1878  *   Attributes of flow that includes this item.
1879  * @param[out] error
1880  *   Pointer to the error structure.
1881  *
1882  * @return
1883  *   0 on success, a negative errno value otherwise and rte_errno is set.
1884  */
1885 static int
1886 flow_dv_convert_action_modify_field
1887                         (struct rte_eth_dev *dev,
1888                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1889                          const struct rte_flow_action *action,
1890                          const struct rte_flow_attr *attr,
1891                          struct rte_flow_error *error)
1892 {
1893         struct mlx5_priv *priv = dev->data->dev_private;
1894         struct mlx5_dev_config *config = &priv->config;
1895         const struct rte_flow_action_modify_field *conf =
1896                 (const struct rte_flow_action_modify_field *)(action->conf);
1897         struct rte_flow_item item;
1898         struct field_modify_info field[MLX5_ACT_MAX_MOD_FIELDS] = {
1899                                                                 {0, 0, 0} };
1900         struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS] = {
1901                                                                 {0, 0, 0} };
1902         uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
1903         uint32_t value[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0};
1904         uint32_t type;
1905         uint32_t dst_width = mlx5_flow_item_field_width(config,
1906                                                         conf->dst.field);
1907
1908         if (conf->src.field == RTE_FLOW_FIELD_POINTER ||
1909                 conf->src.field == RTE_FLOW_FIELD_VALUE) {
1910                 type = MLX5_MODIFICATION_TYPE_SET;
1911                 /** For SET fill the destination field (field) first. */
1912                 mlx5_flow_field_id_to_modify_info(&conf->dst, field, mask,
1913                         value, conf->width, dst_width, dev, attr, error);
1914                 /** Then copy immediate value from source as per mask. */
1915                 mlx5_flow_field_id_to_modify_info(&conf->src, dcopy, mask,
1916                         value, conf->width, dst_width, dev, attr, error);
1917                 item.spec = &value;
1918         } else {
1919                 type = MLX5_MODIFICATION_TYPE_COPY;
1920                 /** For COPY fill the destination field (dcopy) without mask. */
1921                 mlx5_flow_field_id_to_modify_info(&conf->dst, dcopy, NULL,
1922                         value, conf->width, dst_width, dev, attr, error);
1923                 /** Then construct the source field (field) with mask. */
1924                 mlx5_flow_field_id_to_modify_info(&conf->src, field, mask,
1925                         value, conf->width, dst_width, dev, attr, error);
1926         }
1927         item.mask = &mask;
1928         return flow_dv_convert_modify_action(&item,
1929                         field, dcopy, resource, type, error);
1930 }
1931
1932 /**
1933  * Validate MARK item.
1934  *
1935  * @param[in] dev
1936  *   Pointer to the rte_eth_dev structure.
1937  * @param[in] item
1938  *   Item specification.
1939  * @param[in] attr
1940  *   Attributes of flow that includes this item.
1941  * @param[out] error
1942  *   Pointer to error structure.
1943  *
1944  * @return
1945  *   0 on success, a negative errno value otherwise and rte_errno is set.
1946  */
1947 static int
1948 flow_dv_validate_item_mark(struct rte_eth_dev *dev,
1949                            const struct rte_flow_item *item,
1950                            const struct rte_flow_attr *attr __rte_unused,
1951                            struct rte_flow_error *error)
1952 {
1953         struct mlx5_priv *priv = dev->data->dev_private;
1954         struct mlx5_dev_config *config = &priv->config;
1955         const struct rte_flow_item_mark *spec = item->spec;
1956         const struct rte_flow_item_mark *mask = item->mask;
1957         const struct rte_flow_item_mark nic_mask = {
1958                 .id = priv->sh->dv_mark_mask,
1959         };
1960         int ret;
1961
1962         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1963                 return rte_flow_error_set(error, ENOTSUP,
1964                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1965                                           "extended metadata feature"
1966                                           " isn't enabled");
1967         if (!mlx5_flow_ext_mreg_supported(dev))
1968                 return rte_flow_error_set(error, ENOTSUP,
1969                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1970                                           "extended metadata register"
1971                                           " isn't supported");
1972         if (!nic_mask.id)
1973                 return rte_flow_error_set(error, ENOTSUP,
1974                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1975                                           "extended metadata register"
1976                                           " isn't available");
1977         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1978         if (ret < 0)
1979                 return ret;
1980         if (!spec)
1981                 return rte_flow_error_set(error, EINVAL,
1982                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1983                                           item->spec,
1984                                           "data cannot be empty");
1985         if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id))
1986                 return rte_flow_error_set(error, EINVAL,
1987                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1988                                           &spec->id,
1989                                           "mark id exceeds the limit");
1990         if (!mask)
1991                 mask = &nic_mask;
1992         if (!mask->id)
1993                 return rte_flow_error_set(error, EINVAL,
1994                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1995                                         "mask cannot be zero");
1996
1997         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1998                                         (const uint8_t *)&nic_mask,
1999                                         sizeof(struct rte_flow_item_mark),
2000                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2001         if (ret < 0)
2002                 return ret;
2003         return 0;
2004 }
2005
2006 /**
2007  * Validate META item.
2008  *
2009  * @param[in] dev
2010  *   Pointer to the rte_eth_dev structure.
2011  * @param[in] item
2012  *   Item specification.
2013  * @param[in] attr
2014  *   Attributes of flow that includes this item.
2015  * @param[out] error
2016  *   Pointer to error structure.
2017  *
2018  * @return
2019  *   0 on success, a negative errno value otherwise and rte_errno is set.
2020  */
2021 static int
2022 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
2023                            const struct rte_flow_item *item,
2024                            const struct rte_flow_attr *attr,
2025                            struct rte_flow_error *error)
2026 {
2027         struct mlx5_priv *priv = dev->data->dev_private;
2028         struct mlx5_dev_config *config = &priv->config;
2029         const struct rte_flow_item_meta *spec = item->spec;
2030         const struct rte_flow_item_meta *mask = item->mask;
2031         struct rte_flow_item_meta nic_mask = {
2032                 .data = UINT32_MAX
2033         };
2034         int reg;
2035         int ret;
2036
2037         if (!spec)
2038                 return rte_flow_error_set(error, EINVAL,
2039                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2040                                           item->spec,
2041                                           "data cannot be empty");
2042         if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
2043                 if (!mlx5_flow_ext_mreg_supported(dev))
2044                         return rte_flow_error_set(error, ENOTSUP,
2045                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2046                                           "extended metadata register"
2047                                           " isn't supported");
2048                 reg = flow_dv_get_metadata_reg(dev, attr, error);
2049                 if (reg < 0)
2050                         return reg;
2051                 if (reg == REG_NON)
2052                         return rte_flow_error_set(error, ENOTSUP,
2053                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2054                                         "unavalable extended metadata register");
2055                 if (reg == REG_B)
2056                         return rte_flow_error_set(error, ENOTSUP,
2057                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2058                                           "match on reg_b "
2059                                           "isn't supported");
2060                 if (reg != REG_A)
2061                         nic_mask.data = priv->sh->dv_meta_mask;
2062         } else {
2063                 if (attr->transfer)
2064                         return rte_flow_error_set(error, ENOTSUP,
2065                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2066                                         "extended metadata feature "
2067                                         "should be enabled when "
2068                                         "meta item is requested "
2069                                         "with e-switch mode ");
2070                 if (attr->ingress)
2071                         return rte_flow_error_set(error, ENOTSUP,
2072                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2073                                         "match on metadata for ingress "
2074                                         "is not supported in legacy "
2075                                         "metadata mode");
2076         }
2077         if (!mask)
2078                 mask = &rte_flow_item_meta_mask;
2079         if (!mask->data)
2080                 return rte_flow_error_set(error, EINVAL,
2081                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2082                                         "mask cannot be zero");
2083
2084         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2085                                         (const uint8_t *)&nic_mask,
2086                                         sizeof(struct rte_flow_item_meta),
2087                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2088         return ret;
2089 }
2090
2091 /**
2092  * Validate TAG item.
2093  *
2094  * @param[in] dev
2095  *   Pointer to the rte_eth_dev structure.
2096  * @param[in] item
2097  *   Item specification.
2098  * @param[in] attr
2099  *   Attributes of flow that includes this item.
2100  * @param[out] error
2101  *   Pointer to error structure.
2102  *
2103  * @return
2104  *   0 on success, a negative errno value otherwise and rte_errno is set.
2105  */
2106 static int
2107 flow_dv_validate_item_tag(struct rte_eth_dev *dev,
2108                           const struct rte_flow_item *item,
2109                           const struct rte_flow_attr *attr __rte_unused,
2110                           struct rte_flow_error *error)
2111 {
2112         const struct rte_flow_item_tag *spec = item->spec;
2113         const struct rte_flow_item_tag *mask = item->mask;
2114         const struct rte_flow_item_tag nic_mask = {
2115                 .data = RTE_BE32(UINT32_MAX),
2116                 .index = 0xff,
2117         };
2118         int ret;
2119
2120         if (!mlx5_flow_ext_mreg_supported(dev))
2121                 return rte_flow_error_set(error, ENOTSUP,
2122                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2123                                           "extensive metadata register"
2124                                           " isn't supported");
2125         if (!spec)
2126                 return rte_flow_error_set(error, EINVAL,
2127                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2128                                           item->spec,
2129                                           "data cannot be empty");
2130         if (!mask)
2131                 mask = &rte_flow_item_tag_mask;
2132         if (!mask->data)
2133                 return rte_flow_error_set(error, EINVAL,
2134                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2135                                         "mask cannot be zero");
2136
2137         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2138                                         (const uint8_t *)&nic_mask,
2139                                         sizeof(struct rte_flow_item_tag),
2140                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2141         if (ret < 0)
2142                 return ret;
2143         if (mask->index != 0xff)
2144                 return rte_flow_error_set(error, EINVAL,
2145                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
2146                                           "partial mask for tag index"
2147                                           " is not supported");
2148         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
2149         if (ret < 0)
2150                 return ret;
2151         MLX5_ASSERT(ret != REG_NON);
2152         return 0;
2153 }
2154
2155 /**
2156  * Validate vport item.
2157  *
2158  * @param[in] dev
2159  *   Pointer to the rte_eth_dev structure.
2160  * @param[in] item
2161  *   Item specification.
2162  * @param[in] attr
2163  *   Attributes of flow that includes this item.
2164  * @param[in] item_flags
2165  *   Bit-fields that holds the items detected until now.
2166  * @param[out] error
2167  *   Pointer to error structure.
2168  *
2169  * @return
2170  *   0 on success, a negative errno value otherwise and rte_errno is set.
2171  */
2172 static int
2173 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
2174                               const struct rte_flow_item *item,
2175                               const struct rte_flow_attr *attr,
2176                               uint64_t item_flags,
2177                               struct rte_flow_error *error)
2178 {
2179         const struct rte_flow_item_port_id *spec = item->spec;
2180         const struct rte_flow_item_port_id *mask = item->mask;
2181         const struct rte_flow_item_port_id switch_mask = {
2182                         .id = 0xffffffff,
2183         };
2184         struct mlx5_priv *esw_priv;
2185         struct mlx5_priv *dev_priv;
2186         int ret;
2187
2188         if (!attr->transfer)
2189                 return rte_flow_error_set(error, EINVAL,
2190                                           RTE_FLOW_ERROR_TYPE_ITEM,
2191                                           NULL,
2192                                           "match on port id is valid only"
2193                                           " when transfer flag is enabled");
2194         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
2195                 return rte_flow_error_set(error, ENOTSUP,
2196                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2197                                           "multiple source ports are not"
2198                                           " supported");
2199         if (!mask)
2200                 mask = &switch_mask;
2201         if (mask->id != 0xffffffff)
2202                 return rte_flow_error_set(error, ENOTSUP,
2203                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2204                                            mask,
2205                                            "no support for partial mask on"
2206                                            " \"id\" field");
2207         ret = mlx5_flow_item_acceptable
2208                                 (item, (const uint8_t *)mask,
2209                                  (const uint8_t *)&rte_flow_item_port_id_mask,
2210                                  sizeof(struct rte_flow_item_port_id),
2211                                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2212         if (ret)
2213                 return ret;
2214         if (!spec)
2215                 return 0;
2216         esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
2217         if (!esw_priv)
2218                 return rte_flow_error_set(error, rte_errno,
2219                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2220                                           "failed to obtain E-Switch info for"
2221                                           " port");
2222         dev_priv = mlx5_dev_to_eswitch_info(dev);
2223         if (!dev_priv)
2224                 return rte_flow_error_set(error, rte_errno,
2225                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2226                                           NULL,
2227                                           "failed to obtain E-Switch info");
2228         if (esw_priv->domain_id != dev_priv->domain_id)
2229                 return rte_flow_error_set(error, EINVAL,
2230                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
2231                                           "cannot match on a port from a"
2232                                           " different E-Switch");
2233         return 0;
2234 }
2235
2236 /**
2237  * Validate VLAN item.
2238  *
2239  * @param[in] item
2240  *   Item specification.
2241  * @param[in] item_flags
2242  *   Bit-fields that holds the items detected until now.
2243  * @param[in] dev
2244  *   Ethernet device flow is being created on.
2245  * @param[out] error
2246  *   Pointer to error structure.
2247  *
2248  * @return
2249  *   0 on success, a negative errno value otherwise and rte_errno is set.
2250  */
2251 static int
2252 flow_dv_validate_item_vlan(const struct rte_flow_item *item,
2253                            uint64_t item_flags,
2254                            struct rte_eth_dev *dev,
2255                            struct rte_flow_error *error)
2256 {
2257         const struct rte_flow_item_vlan *mask = item->mask;
2258         const struct rte_flow_item_vlan nic_mask = {
2259                 .tci = RTE_BE16(UINT16_MAX),
2260                 .inner_type = RTE_BE16(UINT16_MAX),
2261                 .has_more_vlan = 1,
2262         };
2263         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2264         int ret;
2265         const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
2266                                         MLX5_FLOW_LAYER_INNER_L4) :
2267                                        (MLX5_FLOW_LAYER_OUTER_L3 |
2268                                         MLX5_FLOW_LAYER_OUTER_L4);
2269         const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
2270                                         MLX5_FLOW_LAYER_OUTER_VLAN;
2271
2272         if (item_flags & vlanm)
2273                 return rte_flow_error_set(error, EINVAL,
2274                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2275                                           "multiple VLAN layers not supported");
2276         else if ((item_flags & l34m) != 0)
2277                 return rte_flow_error_set(error, EINVAL,
2278                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2279                                           "VLAN cannot follow L3/L4 layer");
2280         if (!mask)
2281                 mask = &rte_flow_item_vlan_mask;
2282         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2283                                         (const uint8_t *)&nic_mask,
2284                                         sizeof(struct rte_flow_item_vlan),
2285                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2286         if (ret)
2287                 return ret;
2288         if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
2289                 struct mlx5_priv *priv = dev->data->dev_private;
2290
2291                 if (priv->vmwa_context) {
2292                         /*
2293                          * Non-NULL context means we have a virtual machine
2294                          * and SR-IOV enabled, we have to create VLAN interface
2295                          * to make hypervisor to setup E-Switch vport
2296                          * context correctly. We avoid creating the multiple
2297                          * VLAN interfaces, so we cannot support VLAN tag mask.
2298                          */
2299                         return rte_flow_error_set(error, EINVAL,
2300                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2301                                                   item,
2302                                                   "VLAN tag mask is not"
2303                                                   " supported in virtual"
2304                                                   " environment");
2305                 }
2306         }
2307         return 0;
2308 }
2309
2310 /*
2311  * GTP flags are contained in 1 byte of the format:
2312  * -------------------------------------------
2313  * | bit   | 0 - 2   | 3  | 4   | 5 | 6 | 7  |
2314  * |-----------------------------------------|
2315  * | value | Version | PT | Res | E | S | PN |
2316  * -------------------------------------------
2317  *
2318  * Matching is supported only for GTP flags E, S, PN.
2319  */
2320 #define MLX5_GTP_FLAGS_MASK     0x07
2321
2322 /**
2323  * Validate GTP item.
2324  *
2325  * @param[in] dev
2326  *   Pointer to the rte_eth_dev structure.
2327  * @param[in] item
2328  *   Item specification.
2329  * @param[in] item_flags
2330  *   Bit-fields that holds the items detected until now.
2331  * @param[out] error
2332  *   Pointer to error structure.
2333  *
2334  * @return
2335  *   0 on success, a negative errno value otherwise and rte_errno is set.
2336  */
2337 static int
2338 flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
2339                           const struct rte_flow_item *item,
2340                           uint64_t item_flags,
2341                           struct rte_flow_error *error)
2342 {
2343         struct mlx5_priv *priv = dev->data->dev_private;
2344         const struct rte_flow_item_gtp *spec = item->spec;
2345         const struct rte_flow_item_gtp *mask = item->mask;
2346         const struct rte_flow_item_gtp nic_mask = {
2347                 .v_pt_rsv_flags = MLX5_GTP_FLAGS_MASK,
2348                 .msg_type = 0xff,
2349                 .teid = RTE_BE32(0xffffffff),
2350         };
2351
2352         if (!priv->config.hca_attr.tunnel_stateless_gtp)
2353                 return rte_flow_error_set(error, ENOTSUP,
2354                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2355                                           "GTP support is not enabled");
2356         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2357                 return rte_flow_error_set(error, ENOTSUP,
2358                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2359                                           "multiple tunnel layers not"
2360                                           " supported");
2361         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2362                 return rte_flow_error_set(error, EINVAL,
2363                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2364                                           "no outer UDP layer found");
2365         if (!mask)
2366                 mask = &rte_flow_item_gtp_mask;
2367         if (spec && spec->v_pt_rsv_flags & ~MLX5_GTP_FLAGS_MASK)
2368                 return rte_flow_error_set(error, ENOTSUP,
2369                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2370                                           "Match is supported for GTP"
2371                                           " flags only");
2372         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2373                                          (const uint8_t *)&nic_mask,
2374                                          sizeof(struct rte_flow_item_gtp),
2375                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2376 }
2377
2378 /**
2379  * Validate GTP PSC item.
2380  *
2381  * @param[in] item
2382  *   Item specification.
2383  * @param[in] last_item
2384  *   Previous validated item in the pattern items.
2385  * @param[in] gtp_item
2386  *   Previous GTP item specification.
2387  * @param[in] attr
2388  *   Pointer to flow attributes.
2389  * @param[out] error
2390  *   Pointer to error structure.
2391  *
2392  * @return
2393  *   0 on success, a negative errno value otherwise and rte_errno is set.
2394  */
2395 static int
2396 flow_dv_validate_item_gtp_psc(const struct rte_flow_item *item,
2397                               uint64_t last_item,
2398                               const struct rte_flow_item *gtp_item,
2399                               const struct rte_flow_attr *attr,
2400                               struct rte_flow_error *error)
2401 {
2402         const struct rte_flow_item_gtp *gtp_spec;
2403         const struct rte_flow_item_gtp *gtp_mask;
2404         const struct rte_flow_item_gtp_psc *spec;
2405         const struct rte_flow_item_gtp_psc *mask;
2406         const struct rte_flow_item_gtp_psc nic_mask = {
2407                 .pdu_type = 0xFF,
2408                 .qfi = 0xFF,
2409         };
2410
2411         if (!gtp_item || !(last_item & MLX5_FLOW_LAYER_GTP))
2412                 return rte_flow_error_set
2413                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2414                          "GTP PSC item must be preceded with GTP item");
2415         gtp_spec = gtp_item->spec;
2416         gtp_mask = gtp_item->mask ? gtp_item->mask : &rte_flow_item_gtp_mask;
2417         /* GTP spec and E flag is requested to match zero. */
2418         if (gtp_spec &&
2419                 (gtp_mask->v_pt_rsv_flags &
2420                 ~gtp_spec->v_pt_rsv_flags & MLX5_GTP_EXT_HEADER_FLAG))
2421                 return rte_flow_error_set
2422                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2423                          "GTP E flag must be 1 to match GTP PSC");
2424         /* Check the flow is not created in group zero. */
2425         if (!attr->transfer && !attr->group)
2426                 return rte_flow_error_set
2427                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2428                          "GTP PSC is not supported for group 0");
2429         /* GTP spec is here and E flag is requested to match zero. */
2430         if (!item->spec)
2431                 return 0;
2432         spec = item->spec;
2433         mask = item->mask ? item->mask : &rte_flow_item_gtp_psc_mask;
2434         if (spec->pdu_type > MLX5_GTP_EXT_MAX_PDU_TYPE)
2435                 return rte_flow_error_set
2436                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2437                          "PDU type should be smaller than 16");
2438         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2439                                          (const uint8_t *)&nic_mask,
2440                                          sizeof(struct rte_flow_item_gtp_psc),
2441                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2442 }
2443
2444 /**
2445  * Validate IPV4 item.
2446  * Use existing validation function mlx5_flow_validate_item_ipv4(), and
2447  * add specific validation of fragment_offset field,
2448  *
2449  * @param[in] item
2450  *   Item specification.
2451  * @param[in] item_flags
2452  *   Bit-fields that holds the items detected until now.
2453  * @param[out] error
2454  *   Pointer to error structure.
2455  *
2456  * @return
2457  *   0 on success, a negative errno value otherwise and rte_errno is set.
2458  */
2459 static int
2460 flow_dv_validate_item_ipv4(const struct rte_flow_item *item,
2461                            uint64_t item_flags,
2462                            uint64_t last_item,
2463                            uint16_t ether_type,
2464                            struct rte_flow_error *error)
2465 {
2466         int ret;
2467         const struct rte_flow_item_ipv4 *spec = item->spec;
2468         const struct rte_flow_item_ipv4 *last = item->last;
2469         const struct rte_flow_item_ipv4 *mask = item->mask;
2470         rte_be16_t fragment_offset_spec = 0;
2471         rte_be16_t fragment_offset_last = 0;
2472         const struct rte_flow_item_ipv4 nic_ipv4_mask = {
2473                 .hdr = {
2474                         .src_addr = RTE_BE32(0xffffffff),
2475                         .dst_addr = RTE_BE32(0xffffffff),
2476                         .type_of_service = 0xff,
2477                         .fragment_offset = RTE_BE16(0xffff),
2478                         .next_proto_id = 0xff,
2479                         .time_to_live = 0xff,
2480                 },
2481         };
2482
2483         ret = mlx5_flow_validate_item_ipv4(item, item_flags, last_item,
2484                                            ether_type, &nic_ipv4_mask,
2485                                            MLX5_ITEM_RANGE_ACCEPTED, error);
2486         if (ret < 0)
2487                 return ret;
2488         if (spec && mask)
2489                 fragment_offset_spec = spec->hdr.fragment_offset &
2490                                        mask->hdr.fragment_offset;
2491         if (!fragment_offset_spec)
2492                 return 0;
2493         /*
2494          * spec and mask are valid, enforce using full mask to make sure the
2495          * complete value is used correctly.
2496          */
2497         if ((mask->hdr.fragment_offset & RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2498                         != RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2499                 return rte_flow_error_set(error, EINVAL,
2500                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2501                                           item, "must use full mask for"
2502                                           " fragment_offset");
2503         /*
2504          * Match on fragment_offset 0x2000 means MF is 1 and frag-offset is 0,
2505          * indicating this is 1st fragment of fragmented packet.
2506          * This is not yet supported in MLX5, return appropriate error message.
2507          */
2508         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG))
2509                 return rte_flow_error_set(error, ENOTSUP,
2510                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2511                                           "match on first fragment not "
2512                                           "supported");
2513         if (fragment_offset_spec && !last)
2514                 return rte_flow_error_set(error, ENOTSUP,
2515                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2516                                           "specified value not supported");
2517         /* spec and last are valid, validate the specified range. */
2518         fragment_offset_last = last->hdr.fragment_offset &
2519                                mask->hdr.fragment_offset;
2520         /*
2521          * Match on fragment_offset spec 0x2001 and last 0x3fff
2522          * means MF is 1 and frag-offset is > 0.
2523          * This packet is fragment 2nd and onward, excluding last.
2524          * This is not yet supported in MLX5, return appropriate
2525          * error message.
2526          */
2527         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG + 1) &&
2528             fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
2529                 return rte_flow_error_set(error, ENOTSUP,
2530                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2531                                           last, "match on following "
2532                                           "fragments not supported");
2533         /*
2534          * Match on fragment_offset spec 0x0001 and last 0x1fff
2535          * means MF is 0 and frag-offset is > 0.
2536          * This packet is last fragment of fragmented packet.
2537          * This is not yet supported in MLX5, return appropriate
2538          * error message.
2539          */
2540         if (fragment_offset_spec == RTE_BE16(1) &&
2541             fragment_offset_last == RTE_BE16(RTE_IPV4_HDR_OFFSET_MASK))
2542                 return rte_flow_error_set(error, ENOTSUP,
2543                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2544                                           last, "match on last "
2545                                           "fragment not supported");
2546         /*
2547          * Match on fragment_offset spec 0x0001 and last 0x3fff
2548          * means MF and/or frag-offset is not 0.
2549          * This is a fragmented packet.
2550          * Other range values are invalid and rejected.
2551          */
2552         if (!(fragment_offset_spec == RTE_BE16(1) &&
2553               fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)))
2554                 return rte_flow_error_set(error, ENOTSUP,
2555                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2556                                           "specified range not supported");
2557         return 0;
2558 }
2559
2560 /**
2561  * Validate IPV6 fragment extension item.
2562  *
2563  * @param[in] item
2564  *   Item specification.
2565  * @param[in] item_flags
2566  *   Bit-fields that holds the items detected until now.
2567  * @param[out] error
2568  *   Pointer to error structure.
2569  *
2570  * @return
2571  *   0 on success, a negative errno value otherwise and rte_errno is set.
2572  */
2573 static int
2574 flow_dv_validate_item_ipv6_frag_ext(const struct rte_flow_item *item,
2575                                     uint64_t item_flags,
2576                                     struct rte_flow_error *error)
2577 {
2578         const struct rte_flow_item_ipv6_frag_ext *spec = item->spec;
2579         const struct rte_flow_item_ipv6_frag_ext *last = item->last;
2580         const struct rte_flow_item_ipv6_frag_ext *mask = item->mask;
2581         rte_be16_t frag_data_spec = 0;
2582         rte_be16_t frag_data_last = 0;
2583         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2584         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2585                                       MLX5_FLOW_LAYER_OUTER_L4;
2586         int ret = 0;
2587         struct rte_flow_item_ipv6_frag_ext nic_mask = {
2588                 .hdr = {
2589                         .next_header = 0xff,
2590                         .frag_data = RTE_BE16(0xffff),
2591                 },
2592         };
2593
2594         if (item_flags & l4m)
2595                 return rte_flow_error_set(error, EINVAL,
2596                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2597                                           "ipv6 fragment extension item cannot "
2598                                           "follow L4 item.");
2599         if ((tunnel && !(item_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
2600             (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6)))
2601                 return rte_flow_error_set(error, EINVAL,
2602                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2603                                           "ipv6 fragment extension item must "
2604                                           "follow ipv6 item");
2605         if (spec && mask)
2606                 frag_data_spec = spec->hdr.frag_data & mask->hdr.frag_data;
2607         if (!frag_data_spec)
2608                 return 0;
2609         /*
2610          * spec and mask are valid, enforce using full mask to make sure the
2611          * complete value is used correctly.
2612          */
2613         if ((mask->hdr.frag_data & RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) !=
2614                                 RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2615                 return rte_flow_error_set(error, EINVAL,
2616                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2617                                           item, "must use full mask for"
2618                                           " frag_data");
2619         /*
2620          * Match on frag_data 0x00001 means M is 1 and frag-offset is 0.
2621          * This is 1st fragment of fragmented packet.
2622          */
2623         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_MF_MASK))
2624                 return rte_flow_error_set(error, ENOTSUP,
2625                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2626                                           "match on first fragment not "
2627                                           "supported");
2628         if (frag_data_spec && !last)
2629                 return rte_flow_error_set(error, EINVAL,
2630                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2631                                           "specified value not supported");
2632         ret = mlx5_flow_item_acceptable
2633                                 (item, (const uint8_t *)mask,
2634                                  (const uint8_t *)&nic_mask,
2635                                  sizeof(struct rte_flow_item_ipv6_frag_ext),
2636                                  MLX5_ITEM_RANGE_ACCEPTED, error);
2637         if (ret)
2638                 return ret;
2639         /* spec and last are valid, validate the specified range. */
2640         frag_data_last = last->hdr.frag_data & mask->hdr.frag_data;
2641         /*
2642          * Match on frag_data spec 0x0009 and last 0xfff9
2643          * means M is 1 and frag-offset is > 0.
2644          * This packet is fragment 2nd and onward, excluding last.
2645          * This is not yet supported in MLX5, return appropriate
2646          * error message.
2647          */
2648         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN |
2649                                        RTE_IPV6_EHDR_MF_MASK) &&
2650             frag_data_last == RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2651                 return rte_flow_error_set(error, ENOTSUP,
2652                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2653                                           last, "match on following "
2654                                           "fragments not supported");
2655         /*
2656          * Match on frag_data spec 0x0008 and last 0xfff8
2657          * means M is 0 and frag-offset is > 0.
2658          * This packet is last fragment of fragmented packet.
2659          * This is not yet supported in MLX5, return appropriate
2660          * error message.
2661          */
2662         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN) &&
2663             frag_data_last == RTE_BE16(RTE_IPV6_EHDR_FO_MASK))
2664                 return rte_flow_error_set(error, ENOTSUP,
2665                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2666                                           last, "match on last "
2667                                           "fragment not supported");
2668         /* Other range values are invalid and rejected. */
2669         return rte_flow_error_set(error, EINVAL,
2670                                   RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2671                                   "specified range not supported");
2672 }
2673
2674 /*
2675  * Validate ASO CT item.
2676  *
2677  * @param[in] dev
2678  *   Pointer to the rte_eth_dev structure.
2679  * @param[in] item
2680  *   Item specification.
2681  * @param[in] item_flags
2682  *   Pointer to bit-fields that holds the items detected until now.
2683  * @param[out] error
2684  *   Pointer to error structure.
2685  *
2686  * @return
2687  *   0 on success, a negative errno value otherwise and rte_errno is set.
2688  */
2689 static int
2690 flow_dv_validate_item_aso_ct(struct rte_eth_dev *dev,
2691                              const struct rte_flow_item *item,
2692                              uint64_t *item_flags,
2693                              struct rte_flow_error *error)
2694 {
2695         const struct rte_flow_item_conntrack *spec = item->spec;
2696         const struct rte_flow_item_conntrack *mask = item->mask;
2697         RTE_SET_USED(dev);
2698         uint32_t flags;
2699
2700         if (*item_flags & MLX5_FLOW_LAYER_ASO_CT)
2701                 return rte_flow_error_set(error, EINVAL,
2702                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2703                                           "Only one CT is supported");
2704         if (!mask)
2705                 mask = &rte_flow_item_conntrack_mask;
2706         flags = spec->flags & mask->flags;
2707         if ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID) &&
2708             ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID) ||
2709              (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD) ||
2710              (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)))
2711                 return rte_flow_error_set(error, EINVAL,
2712                                           RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2713                                           "Conflict status bits");
2714         /* State change also needs to be considered. */
2715         *item_flags |= MLX5_FLOW_LAYER_ASO_CT;
2716         return 0;
2717 }
2718
2719 /**
2720  * Validate the pop VLAN action.
2721  *
2722  * @param[in] dev
2723  *   Pointer to the rte_eth_dev structure.
2724  * @param[in] action_flags
2725  *   Holds the actions detected until now.
2726  * @param[in] action
2727  *   Pointer to the pop vlan action.
2728  * @param[in] item_flags
2729  *   The items found in this flow rule.
2730  * @param[in] attr
2731  *   Pointer to flow attributes.
2732  * @param[out] error
2733  *   Pointer to error structure.
2734  *
2735  * @return
2736  *   0 on success, a negative errno value otherwise and rte_errno is set.
2737  */
2738 static int
2739 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
2740                                  uint64_t action_flags,
2741                                  const struct rte_flow_action *action,
2742                                  uint64_t item_flags,
2743                                  const struct rte_flow_attr *attr,
2744                                  struct rte_flow_error *error)
2745 {
2746         const struct mlx5_priv *priv = dev->data->dev_private;
2747
2748         (void)action;
2749         (void)attr;
2750         if (!priv->sh->pop_vlan_action)
2751                 return rte_flow_error_set(error, ENOTSUP,
2752                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2753                                           NULL,
2754                                           "pop vlan action is not supported");
2755         if (attr->egress)
2756                 return rte_flow_error_set(error, ENOTSUP,
2757                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2758                                           NULL,
2759                                           "pop vlan action not supported for "
2760                                           "egress");
2761         if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
2762                 return rte_flow_error_set(error, ENOTSUP,
2763                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2764                                           "no support for multiple VLAN "
2765                                           "actions");
2766         /* Pop VLAN with preceding Decap requires inner header with VLAN. */
2767         if ((action_flags & MLX5_FLOW_ACTION_DECAP) &&
2768             !(item_flags & MLX5_FLOW_LAYER_INNER_VLAN))
2769                 return rte_flow_error_set(error, ENOTSUP,
2770                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2771                                           NULL,
2772                                           "cannot pop vlan after decap without "
2773                                           "match on inner vlan in the flow");
2774         /* Pop VLAN without preceding Decap requires outer header with VLAN. */
2775         if (!(action_flags & MLX5_FLOW_ACTION_DECAP) &&
2776             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2777                 return rte_flow_error_set(error, ENOTSUP,
2778                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2779                                           NULL,
2780                                           "cannot pop vlan without a "
2781                                           "match on (outer) vlan in the flow");
2782         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2783                 return rte_flow_error_set(error, EINVAL,
2784                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2785                                           "wrong action order, port_id should "
2786                                           "be after pop VLAN action");
2787         if (!attr->transfer && priv->representor)
2788                 return rte_flow_error_set(error, ENOTSUP,
2789                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2790                                           "pop vlan action for VF representor "
2791                                           "not supported on NIC table");
2792         return 0;
2793 }
2794
2795 /**
2796  * Get VLAN default info from vlan match info.
2797  *
2798  * @param[in] items
2799  *   the list of item specifications.
2800  * @param[out] vlan
2801  *   pointer VLAN info to fill to.
2802  *
2803  * @return
2804  *   0 on success, a negative errno value otherwise and rte_errno is set.
2805  */
2806 static void
2807 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
2808                                   struct rte_vlan_hdr *vlan)
2809 {
2810         const struct rte_flow_item_vlan nic_mask = {
2811                 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
2812                                 MLX5DV_FLOW_VLAN_VID_MASK),
2813                 .inner_type = RTE_BE16(0xffff),
2814         };
2815
2816         if (items == NULL)
2817                 return;
2818         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2819                 int type = items->type;
2820
2821                 if (type == RTE_FLOW_ITEM_TYPE_VLAN ||
2822                     type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
2823                         break;
2824         }
2825         if (items->type != RTE_FLOW_ITEM_TYPE_END) {
2826                 const struct rte_flow_item_vlan *vlan_m = items->mask;
2827                 const struct rte_flow_item_vlan *vlan_v = items->spec;
2828
2829                 /* If VLAN item in pattern doesn't contain data, return here. */
2830                 if (!vlan_v)
2831                         return;
2832                 if (!vlan_m)
2833                         vlan_m = &nic_mask;
2834                 /* Only full match values are accepted */
2835                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
2836                      MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
2837                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
2838                         vlan->vlan_tci |=
2839                                 rte_be_to_cpu_16(vlan_v->tci &
2840                                                  MLX5DV_FLOW_VLAN_PCP_MASK_BE);
2841                 }
2842                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
2843                      MLX5DV_FLOW_VLAN_VID_MASK_BE) {
2844                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
2845                         vlan->vlan_tci |=
2846                                 rte_be_to_cpu_16(vlan_v->tci &
2847                                                  MLX5DV_FLOW_VLAN_VID_MASK_BE);
2848                 }
2849                 if (vlan_m->inner_type == nic_mask.inner_type)
2850                         vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
2851                                                            vlan_m->inner_type);
2852         }
2853 }
2854
2855 /**
2856  * Validate the push VLAN action.
2857  *
2858  * @param[in] dev
2859  *   Pointer to the rte_eth_dev structure.
2860  * @param[in] action_flags
2861  *   Holds the actions detected until now.
2862  * @param[in] item_flags
2863  *   The items found in this flow rule.
2864  * @param[in] action
2865  *   Pointer to the action structure.
2866  * @param[in] attr
2867  *   Pointer to flow attributes
2868  * @param[out] error
2869  *   Pointer to error structure.
2870  *
2871  * @return
2872  *   0 on success, a negative errno value otherwise and rte_errno is set.
2873  */
2874 static int
2875 flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
2876                                   uint64_t action_flags,
2877                                   const struct rte_flow_item_vlan *vlan_m,
2878                                   const struct rte_flow_action *action,
2879                                   const struct rte_flow_attr *attr,
2880                                   struct rte_flow_error *error)
2881 {
2882         const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
2883         const struct mlx5_priv *priv = dev->data->dev_private;
2884
2885         if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
2886             push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
2887                 return rte_flow_error_set(error, EINVAL,
2888                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2889                                           "invalid vlan ethertype");
2890         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2891                 return rte_flow_error_set(error, EINVAL,
2892                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2893                                           "wrong action order, port_id should "
2894                                           "be after push VLAN");
2895         if (!attr->transfer && priv->representor)
2896                 return rte_flow_error_set(error, ENOTSUP,
2897                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2898                                           "push vlan action for VF representor "
2899                                           "not supported on NIC table");
2900         if (vlan_m &&
2901             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) &&
2902             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) !=
2903                 MLX5DV_FLOW_VLAN_PCP_MASK_BE &&
2904             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) &&
2905             !(mlx5_flow_find_action
2906                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)))
2907                 return rte_flow_error_set(error, EINVAL,
2908                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2909                                           "not full match mask on VLAN PCP and "
2910                                           "there is no of_set_vlan_pcp action, "
2911                                           "push VLAN action cannot figure out "
2912                                           "PCP value");
2913         if (vlan_m &&
2914             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) &&
2915             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) !=
2916                 MLX5DV_FLOW_VLAN_VID_MASK_BE &&
2917             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) &&
2918             !(mlx5_flow_find_action
2919                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)))
2920                 return rte_flow_error_set(error, EINVAL,
2921                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2922                                           "not full match mask on VLAN VID and "
2923                                           "there is no of_set_vlan_vid action, "
2924                                           "push VLAN action cannot figure out "
2925                                           "VID value");
2926         (void)attr;
2927         return 0;
2928 }
2929
2930 /**
2931  * Validate the set VLAN PCP.
2932  *
2933  * @param[in] action_flags
2934  *   Holds the actions detected until now.
2935  * @param[in] actions
2936  *   Pointer to the list of actions remaining in the flow rule.
2937  * @param[out] error
2938  *   Pointer to error structure.
2939  *
2940  * @return
2941  *   0 on success, a negative errno value otherwise and rte_errno is set.
2942  */
2943 static int
2944 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
2945                                      const struct rte_flow_action actions[],
2946                                      struct rte_flow_error *error)
2947 {
2948         const struct rte_flow_action *action = actions;
2949         const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
2950
2951         if (conf->vlan_pcp > 7)
2952                 return rte_flow_error_set(error, EINVAL,
2953                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2954                                           "VLAN PCP value is too big");
2955         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
2956                 return rte_flow_error_set(error, ENOTSUP,
2957                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2958                                           "set VLAN PCP action must follow "
2959                                           "the push VLAN action");
2960         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
2961                 return rte_flow_error_set(error, ENOTSUP,
2962                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2963                                           "Multiple VLAN PCP modification are "
2964                                           "not supported");
2965         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2966                 return rte_flow_error_set(error, EINVAL,
2967                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2968                                           "wrong action order, port_id should "
2969                                           "be after set VLAN PCP");
2970         return 0;
2971 }
2972
2973 /**
2974  * Validate the set VLAN VID.
2975  *
2976  * @param[in] item_flags
2977  *   Holds the items detected in this rule.
2978  * @param[in] action_flags
2979  *   Holds the actions detected until now.
2980  * @param[in] actions
2981  *   Pointer to the list of actions remaining in the flow rule.
2982  * @param[out] error
2983  *   Pointer to error structure.
2984  *
2985  * @return
2986  *   0 on success, a negative errno value otherwise and rte_errno is set.
2987  */
2988 static int
2989 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
2990                                      uint64_t action_flags,
2991                                      const struct rte_flow_action actions[],
2992                                      struct rte_flow_error *error)
2993 {
2994         const struct rte_flow_action *action = actions;
2995         const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
2996
2997         if (rte_be_to_cpu_16(conf->vlan_vid) > 0xFFE)
2998                 return rte_flow_error_set(error, EINVAL,
2999                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3000                                           "VLAN VID value is too big");
3001         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
3002             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
3003                 return rte_flow_error_set(error, ENOTSUP,
3004                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3005                                           "set VLAN VID action must follow push"
3006                                           " VLAN action or match on VLAN item");
3007         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
3008                 return rte_flow_error_set(error, ENOTSUP,
3009                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3010                                           "Multiple VLAN VID modifications are "
3011                                           "not supported");
3012         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
3013                 return rte_flow_error_set(error, EINVAL,
3014                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3015                                           "wrong action order, port_id should "
3016                                           "be after set VLAN VID");
3017         return 0;
3018 }
3019
3020 /*
3021  * Validate the FLAG action.
3022  *
3023  * @param[in] dev
3024  *   Pointer to the rte_eth_dev structure.
3025  * @param[in] action_flags
3026  *   Holds the actions detected until now.
3027  * @param[in] attr
3028  *   Pointer to flow attributes
3029  * @param[out] error
3030  *   Pointer to error structure.
3031  *
3032  * @return
3033  *   0 on success, a negative errno value otherwise and rte_errno is set.
3034  */
3035 static int
3036 flow_dv_validate_action_flag(struct rte_eth_dev *dev,
3037                              uint64_t action_flags,
3038                              const struct rte_flow_attr *attr,
3039                              struct rte_flow_error *error)
3040 {
3041         struct mlx5_priv *priv = dev->data->dev_private;
3042         struct mlx5_dev_config *config = &priv->config;
3043         int ret;
3044
3045         /* Fall back if no extended metadata register support. */
3046         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
3047                 return mlx5_flow_validate_action_flag(action_flags, attr,
3048                                                       error);
3049         /* Extensive metadata mode requires registers. */
3050         if (!mlx5_flow_ext_mreg_supported(dev))
3051                 return rte_flow_error_set(error, ENOTSUP,
3052                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3053                                           "no metadata registers "
3054                                           "to support flag action");
3055         if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT))
3056                 return rte_flow_error_set(error, ENOTSUP,
3057                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3058                                           "extended metadata register"
3059                                           " isn't available");
3060         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3061         if (ret < 0)
3062                 return ret;
3063         MLX5_ASSERT(ret > 0);
3064         if (action_flags & MLX5_FLOW_ACTION_MARK)
3065                 return rte_flow_error_set(error, EINVAL,
3066                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3067                                           "can't mark and flag in same flow");
3068         if (action_flags & MLX5_FLOW_ACTION_FLAG)
3069                 return rte_flow_error_set(error, EINVAL,
3070                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3071                                           "can't have 2 flag"
3072                                           " actions in same flow");
3073         return 0;
3074 }
3075
3076 /**
3077  * Validate MARK action.
3078  *
3079  * @param[in] dev
3080  *   Pointer to the rte_eth_dev structure.
3081  * @param[in] action
3082  *   Pointer to action.
3083  * @param[in] action_flags
3084  *   Holds the actions detected until now.
3085  * @param[in] attr
3086  *   Pointer to flow attributes
3087  * @param[out] error
3088  *   Pointer to error structure.
3089  *
3090  * @return
3091  *   0 on success, a negative errno value otherwise and rte_errno is set.
3092  */
3093 static int
3094 flow_dv_validate_action_mark(struct rte_eth_dev *dev,
3095                              const struct rte_flow_action *action,
3096                              uint64_t action_flags,
3097                              const struct rte_flow_attr *attr,
3098                              struct rte_flow_error *error)
3099 {
3100         struct mlx5_priv *priv = dev->data->dev_private;
3101         struct mlx5_dev_config *config = &priv->config;
3102         const struct rte_flow_action_mark *mark = action->conf;
3103         int ret;
3104
3105         if (is_tunnel_offload_active(dev))
3106                 return rte_flow_error_set(error, ENOTSUP,
3107                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3108                                           "no mark action "
3109                                           "if tunnel offload active");
3110         /* Fall back if no extended metadata register support. */
3111         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
3112                 return mlx5_flow_validate_action_mark(action, action_flags,
3113                                                       attr, error);
3114         /* Extensive metadata mode requires registers. */
3115         if (!mlx5_flow_ext_mreg_supported(dev))
3116                 return rte_flow_error_set(error, ENOTSUP,
3117                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3118                                           "no metadata registers "
3119                                           "to support mark action");
3120         if (!priv->sh->dv_mark_mask)
3121                 return rte_flow_error_set(error, ENOTSUP,
3122                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3123                                           "extended metadata register"
3124                                           " isn't available");
3125         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3126         if (ret < 0)
3127                 return ret;
3128         MLX5_ASSERT(ret > 0);
3129         if (!mark)
3130                 return rte_flow_error_set(error, EINVAL,
3131                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3132                                           "configuration cannot be null");
3133         if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask))
3134                 return rte_flow_error_set(error, EINVAL,
3135                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3136                                           &mark->id,
3137                                           "mark id exceeds the limit");
3138         if (action_flags & MLX5_FLOW_ACTION_FLAG)
3139                 return rte_flow_error_set(error, EINVAL,
3140                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3141                                           "can't flag and mark in same flow");
3142         if (action_flags & MLX5_FLOW_ACTION_MARK)
3143                 return rte_flow_error_set(error, EINVAL,
3144                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3145                                           "can't have 2 mark actions in same"
3146                                           " flow");
3147         return 0;
3148 }
3149
3150 /**
3151  * Validate SET_META action.
3152  *
3153  * @param[in] dev
3154  *   Pointer to the rte_eth_dev structure.
3155  * @param[in] action
3156  *   Pointer to the action structure.
3157  * @param[in] action_flags
3158  *   Holds the actions detected until now.
3159  * @param[in] attr
3160  *   Pointer to flow attributes
3161  * @param[out] error
3162  *   Pointer to error structure.
3163  *
3164  * @return
3165  *   0 on success, a negative errno value otherwise and rte_errno is set.
3166  */
3167 static int
3168 flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
3169                                  const struct rte_flow_action *action,
3170                                  uint64_t action_flags __rte_unused,
3171                                  const struct rte_flow_attr *attr,
3172                                  struct rte_flow_error *error)
3173 {
3174         const struct rte_flow_action_set_meta *conf;
3175         uint32_t nic_mask = UINT32_MAX;
3176         int reg;
3177
3178         if (!mlx5_flow_ext_mreg_supported(dev))
3179                 return rte_flow_error_set(error, ENOTSUP,
3180                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3181                                           "extended metadata register"
3182                                           " isn't supported");
3183         reg = flow_dv_get_metadata_reg(dev, attr, error);
3184         if (reg < 0)
3185                 return reg;
3186         if (reg == REG_NON)
3187                 return rte_flow_error_set(error, ENOTSUP,
3188                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3189                                           "unavalable extended metadata register");
3190         if (reg != REG_A && reg != REG_B) {
3191                 struct mlx5_priv *priv = dev->data->dev_private;
3192
3193                 nic_mask = priv->sh->dv_meta_mask;
3194         }
3195         if (!(action->conf))
3196                 return rte_flow_error_set(error, EINVAL,
3197                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3198                                           "configuration cannot be null");
3199         conf = (const struct rte_flow_action_set_meta *)action->conf;
3200         if (!conf->mask)
3201                 return rte_flow_error_set(error, EINVAL,
3202                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3203                                           "zero mask doesn't have any effect");
3204         if (conf->mask & ~nic_mask)
3205                 return rte_flow_error_set(error, EINVAL,
3206                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3207                                           "meta data must be within reg C0");
3208         return 0;
3209 }
3210
3211 /**
3212  * Validate SET_TAG action.
3213  *
3214  * @param[in] dev
3215  *   Pointer to the rte_eth_dev structure.
3216  * @param[in] action
3217  *   Pointer to the action structure.
3218  * @param[in] action_flags
3219  *   Holds the actions detected until now.
3220  * @param[in] attr
3221  *   Pointer to flow attributes
3222  * @param[out] error
3223  *   Pointer to error structure.
3224  *
3225  * @return
3226  *   0 on success, a negative errno value otherwise and rte_errno is set.
3227  */
3228 static int
3229 flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
3230                                 const struct rte_flow_action *action,
3231                                 uint64_t action_flags,
3232                                 const struct rte_flow_attr *attr,
3233                                 struct rte_flow_error *error)
3234 {
3235         const struct rte_flow_action_set_tag *conf;
3236         const uint64_t terminal_action_flags =
3237                 MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE |
3238                 MLX5_FLOW_ACTION_RSS;
3239         int ret;
3240
3241         if (!mlx5_flow_ext_mreg_supported(dev))
3242                 return rte_flow_error_set(error, ENOTSUP,
3243                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3244                                           "extensive metadata register"
3245                                           " isn't supported");
3246         if (!(action->conf))
3247                 return rte_flow_error_set(error, EINVAL,
3248                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3249                                           "configuration cannot be null");
3250         conf = (const struct rte_flow_action_set_tag *)action->conf;
3251         if (!conf->mask)
3252                 return rte_flow_error_set(error, EINVAL,
3253                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3254                                           "zero mask doesn't have any effect");
3255         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
3256         if (ret < 0)
3257                 return ret;
3258         if (!attr->transfer && attr->ingress &&
3259             (action_flags & terminal_action_flags))
3260                 return rte_flow_error_set(error, EINVAL,
3261                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3262                                           "set_tag has no effect"
3263                                           " with terminal actions");
3264         return 0;
3265 }
3266
3267 /**
3268  * Check if action counter is shared by either old or new mechanism.
3269  *
3270  * @param[in] action
3271  *   Pointer to the action structure.
3272  *
3273  * @return
3274  *   True when counter is shared, false otherwise.
3275  */
3276 static inline bool
3277 is_shared_action_count(const struct rte_flow_action *action)
3278 {
3279         const struct rte_flow_action_count *count =
3280                         (const struct rte_flow_action_count *)action->conf;
3281
3282         if ((int)action->type == MLX5_RTE_FLOW_ACTION_TYPE_COUNT)
3283                 return true;
3284         return !!(count && count->shared);
3285 }
3286
3287 /**
3288  * Validate count action.
3289  *
3290  * @param[in] dev
3291  *   Pointer to rte_eth_dev structure.
3292  * @param[in] shared
3293  *   Indicator if action is shared.
3294  * @param[in] action_flags
3295  *   Holds the actions detected until now.
3296  * @param[out] error
3297  *   Pointer to error structure.
3298  *
3299  * @return
3300  *   0 on success, a negative errno value otherwise and rte_errno is set.
3301  */
3302 static int
3303 flow_dv_validate_action_count(struct rte_eth_dev *dev, bool shared,
3304                               uint64_t action_flags,
3305                               struct rte_flow_error *error)
3306 {
3307         struct mlx5_priv *priv = dev->data->dev_private;
3308
3309         if (!priv->config.devx)
3310                 goto notsup_err;
3311         if (action_flags & MLX5_FLOW_ACTION_COUNT)
3312                 return rte_flow_error_set(error, EINVAL,
3313                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3314                                           "duplicate count actions set");
3315         if (shared && (action_flags & MLX5_FLOW_ACTION_AGE) &&
3316             !priv->sh->flow_hit_aso_en)
3317                 return rte_flow_error_set(error, EINVAL,
3318                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3319                                           "old age and shared count combination is not supported");
3320 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
3321         return 0;
3322 #endif
3323 notsup_err:
3324         return rte_flow_error_set
3325                       (error, ENOTSUP,
3326                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3327                        NULL,
3328                        "count action not supported");
3329 }
3330
3331 /**
3332  * Validate the L2 encap action.
3333  *
3334  * @param[in] dev
3335  *   Pointer to the rte_eth_dev structure.
3336  * @param[in] action_flags
3337  *   Holds the actions detected until now.
3338  * @param[in] action
3339  *   Pointer to the action structure.
3340  * @param[in] attr
3341  *   Pointer to flow attributes.
3342  * @param[out] error
3343  *   Pointer to error structure.
3344  *
3345  * @return
3346  *   0 on success, a negative errno value otherwise and rte_errno is set.
3347  */
3348 static int
3349 flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
3350                                  uint64_t action_flags,
3351                                  const struct rte_flow_action *action,
3352                                  const struct rte_flow_attr *attr,
3353                                  struct rte_flow_error *error)
3354 {
3355         const struct mlx5_priv *priv = dev->data->dev_private;
3356
3357         if (!(action->conf))
3358                 return rte_flow_error_set(error, EINVAL,
3359                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3360                                           "configuration cannot be null");
3361         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3362                 return rte_flow_error_set(error, EINVAL,
3363                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3364                                           "can only have a single encap action "
3365                                           "in a flow");
3366         if (!attr->transfer && priv->representor)
3367                 return rte_flow_error_set(error, ENOTSUP,
3368                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3369                                           "encap action for VF representor "
3370                                           "not supported on NIC table");
3371         return 0;
3372 }
3373
3374 /**
3375  * Validate a decap action.
3376  *
3377  * @param[in] dev
3378  *   Pointer to the rte_eth_dev structure.
3379  * @param[in] action_flags
3380  *   Holds the actions detected until now.
3381  * @param[in] action
3382  *   Pointer to the action structure.
3383  * @param[in] item_flags
3384  *   Holds the items detected.
3385  * @param[in] attr
3386  *   Pointer to flow attributes
3387  * @param[out] error
3388  *   Pointer to error structure.
3389  *
3390  * @return
3391  *   0 on success, a negative errno value otherwise and rte_errno is set.
3392  */
3393 static int
3394 flow_dv_validate_action_decap(struct rte_eth_dev *dev,
3395                               uint64_t action_flags,
3396                               const struct rte_flow_action *action,
3397                               const uint64_t item_flags,
3398                               const struct rte_flow_attr *attr,
3399                               struct rte_flow_error *error)
3400 {
3401         const struct mlx5_priv *priv = dev->data->dev_private;
3402
3403         if (priv->config.hca_attr.scatter_fcs_w_decap_disable &&
3404             !priv->config.decap_en)
3405                 return rte_flow_error_set(error, ENOTSUP,
3406                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3407                                           "decap is not enabled");
3408         if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
3409                 return rte_flow_error_set(error, ENOTSUP,
3410                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3411                                           action_flags &
3412                                           MLX5_FLOW_ACTION_DECAP ? "can only "
3413                                           "have a single decap action" : "decap "
3414                                           "after encap is not supported");
3415         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
3416                 return rte_flow_error_set(error, EINVAL,
3417                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3418                                           "can't have decap action after"
3419                                           " modify action");
3420         if (attr->egress)
3421                 return rte_flow_error_set(error, ENOTSUP,
3422                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
3423                                           NULL,
3424                                           "decap action not supported for "
3425                                           "egress");
3426         if (!attr->transfer && priv->representor)
3427                 return rte_flow_error_set(error, ENOTSUP,
3428                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3429                                           "decap action for VF representor "
3430                                           "not supported on NIC table");
3431         if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP &&
3432             !(item_flags & MLX5_FLOW_LAYER_VXLAN))
3433                 return rte_flow_error_set(error, ENOTSUP,
3434                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3435                                 "VXLAN item should be present for VXLAN decap");
3436         return 0;
3437 }
3438
3439 const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,};
3440
3441 /**
3442  * Validate the raw encap and decap actions.
3443  *
3444  * @param[in] dev
3445  *   Pointer to the rte_eth_dev structure.
3446  * @param[in] decap
3447  *   Pointer to the decap action.
3448  * @param[in] encap
3449  *   Pointer to the encap action.
3450  * @param[in] attr
3451  *   Pointer to flow attributes
3452  * @param[in/out] action_flags
3453  *   Holds the actions detected until now.
3454  * @param[out] actions_n
3455  *   pointer to the number of actions counter.
3456  * @param[in] action
3457  *   Pointer to the action structure.
3458  * @param[in] item_flags
3459  *   Holds the items detected.
3460  * @param[out] error
3461  *   Pointer to error structure.
3462  *
3463  * @return
3464  *   0 on success, a negative errno value otherwise and rte_errno is set.
3465  */
3466 static int
3467 flow_dv_validate_action_raw_encap_decap
3468         (struct rte_eth_dev *dev,
3469          const struct rte_flow_action_raw_decap *decap,
3470          const struct rte_flow_action_raw_encap *encap,
3471          const struct rte_flow_attr *attr, uint64_t *action_flags,
3472          int *actions_n, const struct rte_flow_action *action,
3473          uint64_t item_flags, struct rte_flow_error *error)
3474 {
3475         const struct mlx5_priv *priv = dev->data->dev_private;
3476         int ret;
3477
3478         if (encap && (!encap->size || !encap->data))
3479                 return rte_flow_error_set(error, EINVAL,
3480                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3481                                           "raw encap data cannot be empty");
3482         if (decap && encap) {
3483                 if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE &&
3484                     encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
3485                         /* L3 encap. */
3486                         decap = NULL;
3487                 else if (encap->size <=
3488                            MLX5_ENCAPSULATION_DECISION_SIZE &&
3489                            decap->size >
3490                            MLX5_ENCAPSULATION_DECISION_SIZE)
3491                         /* L3 decap. */
3492                         encap = NULL;
3493                 else if (encap->size >
3494                            MLX5_ENCAPSULATION_DECISION_SIZE &&
3495                            decap->size >
3496                            MLX5_ENCAPSULATION_DECISION_SIZE)
3497                         /* 2 L2 actions: encap and decap. */
3498                         ;
3499                 else
3500                         return rte_flow_error_set(error,
3501                                 ENOTSUP,
3502                                 RTE_FLOW_ERROR_TYPE_ACTION,
3503                                 NULL, "unsupported too small "
3504                                 "raw decap and too small raw "
3505                                 "encap combination");
3506         }
3507         if (decap) {
3508                 ret = flow_dv_validate_action_decap(dev, *action_flags, action,
3509                                                     item_flags, attr, error);
3510                 if (ret < 0)
3511                         return ret;
3512                 *action_flags |= MLX5_FLOW_ACTION_DECAP;
3513                 ++(*actions_n);
3514         }
3515         if (encap) {
3516                 if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE)
3517                         return rte_flow_error_set(error, ENOTSUP,
3518                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3519                                                   NULL,
3520                                                   "small raw encap size");
3521                 if (*action_flags & MLX5_FLOW_ACTION_ENCAP)
3522                         return rte_flow_error_set(error, EINVAL,
3523                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3524                                                   NULL,
3525                                                   "more than one encap action");
3526                 if (!attr->transfer && priv->representor)
3527                         return rte_flow_error_set
3528                                         (error, ENOTSUP,
3529                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3530                                          "encap action for VF representor "
3531                                          "not supported on NIC table");
3532                 *action_flags |= MLX5_FLOW_ACTION_ENCAP;
3533                 ++(*actions_n);
3534         }
3535         return 0;
3536 }
3537
3538 /*
3539  * Validate the ASO CT action.
3540  *
3541  * @param[in] dev
3542  *   Pointer to the rte_eth_dev structure.
3543  * @param[in] action_flags
3544  *   Holds the actions detected until now.
3545  * @param[in] item_flags
3546  *   The items found in this flow rule.
3547  * @param[in] attr
3548  *   Pointer to flow attributes.
3549  * @param[out] error
3550  *   Pointer to error structure.
3551  *
3552  * @return
3553  *   0 on success, a negative errno value otherwise and rte_errno is set.
3554  */
3555 static int
3556 flow_dv_validate_action_aso_ct(struct rte_eth_dev *dev,
3557                                uint64_t action_flags,
3558                                uint64_t item_flags,
3559                                const struct rte_flow_attr *attr,
3560                                struct rte_flow_error *error)
3561 {
3562         RTE_SET_USED(dev);
3563
3564         if (attr->group == 0 && !attr->transfer)
3565                 return rte_flow_error_set(error, ENOTSUP,
3566                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3567                                           NULL,
3568                                           "Only support non-root table");
3569         if (action_flags & MLX5_FLOW_FATE_ACTIONS)
3570                 return rte_flow_error_set(error, ENOTSUP,
3571                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3572                                           "CT cannot follow a fate action");
3573         if ((action_flags & MLX5_FLOW_ACTION_METER) ||
3574             (action_flags & MLX5_FLOW_ACTION_AGE))
3575                 return rte_flow_error_set(error, EINVAL,
3576                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3577                                           "Only one ASO action is supported");
3578         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3579                 return rte_flow_error_set(error, EINVAL,
3580                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3581                                           "Encap cannot exist before CT");
3582         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
3583                 return rte_flow_error_set(error, EINVAL,
3584                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3585                                           "Not a outer TCP packet");
3586         return 0;
3587 }
3588
3589 /**
3590  * Match encap_decap resource.
3591  *
3592  * @param list
3593  *   Pointer to the hash list.
3594  * @param entry
3595  *   Pointer to exist resource entry object.
3596  * @param key
3597  *   Key of the new entry.
3598  * @param ctx_cb
3599  *   Pointer to new encap_decap resource.
3600  *
3601  * @return
3602  *   0 on matching, none-zero otherwise.
3603  */
3604 int
3605 flow_dv_encap_decap_match_cb(struct mlx5_hlist *list __rte_unused,
3606                              struct mlx5_hlist_entry *entry,
3607                              uint64_t key __rte_unused, void *cb_ctx)
3608 {
3609         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3610         struct mlx5_flow_dv_encap_decap_resource *ctx_resource = ctx->data;
3611         struct mlx5_flow_dv_encap_decap_resource *resource;
3612
3613         resource = container_of(entry, struct mlx5_flow_dv_encap_decap_resource,
3614                                 entry);
3615         if (resource->reformat_type == ctx_resource->reformat_type &&
3616             resource->ft_type == ctx_resource->ft_type &&
3617             resource->flags == ctx_resource->flags &&
3618             resource->size == ctx_resource->size &&
3619             !memcmp((const void *)resource->buf,
3620                     (const void *)ctx_resource->buf,
3621                     resource->size))
3622                 return 0;
3623         return -1;
3624 }
3625
3626 /**
3627  * Allocate encap_decap resource.
3628  *
3629  * @param list
3630  *   Pointer to the hash list.
3631  * @param entry
3632  *   Pointer to exist resource entry object.
3633  * @param ctx_cb
3634  *   Pointer to new encap_decap resource.
3635  *
3636  * @return
3637  *   0 on matching, none-zero otherwise.
3638  */
3639 struct mlx5_hlist_entry *
3640 flow_dv_encap_decap_create_cb(struct mlx5_hlist *list,
3641                               uint64_t key __rte_unused,
3642                               void *cb_ctx)
3643 {
3644         struct mlx5_dev_ctx_shared *sh = list->ctx;
3645         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3646         struct mlx5dv_dr_domain *domain;
3647         struct mlx5_flow_dv_encap_decap_resource *ctx_resource = ctx->data;
3648         struct mlx5_flow_dv_encap_decap_resource *resource;
3649         uint32_t idx;
3650         int ret;
3651
3652         if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3653                 domain = sh->fdb_domain;
3654         else if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3655                 domain = sh->rx_domain;
3656         else
3657                 domain = sh->tx_domain;
3658         /* Register new encap/decap resource. */
3659         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], &idx);
3660         if (!resource) {
3661                 rte_flow_error_set(ctx->error, ENOMEM,
3662                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3663                                    "cannot allocate resource memory");
3664                 return NULL;
3665         }
3666         *resource = *ctx_resource;
3667         resource->idx = idx;
3668         ret = mlx5_flow_os_create_flow_action_packet_reformat(sh->ctx, domain,
3669                                                               resource,
3670                                                              &resource->action);
3671         if (ret) {
3672                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
3673                 rte_flow_error_set(ctx->error, ENOMEM,
3674                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3675                                    NULL, "cannot create action");
3676                 return NULL;
3677         }
3678
3679         return &resource->entry;
3680 }
3681
3682 /**
3683  * Find existing encap/decap resource or create and register a new one.
3684  *
3685  * @param[in, out] dev
3686  *   Pointer to rte_eth_dev structure.
3687  * @param[in, out] resource
3688  *   Pointer to encap/decap resource.
3689  * @parm[in, out] dev_flow
3690  *   Pointer to the dev_flow.
3691  * @param[out] error
3692  *   pointer to error structure.
3693  *
3694  * @return
3695  *   0 on success otherwise -errno and errno is set.
3696  */
3697 static int
3698 flow_dv_encap_decap_resource_register
3699                         (struct rte_eth_dev *dev,
3700                          struct mlx5_flow_dv_encap_decap_resource *resource,
3701                          struct mlx5_flow *dev_flow,
3702                          struct rte_flow_error *error)
3703 {
3704         struct mlx5_priv *priv = dev->data->dev_private;
3705         struct mlx5_dev_ctx_shared *sh = priv->sh;
3706         struct mlx5_hlist_entry *entry;
3707         union {
3708                 struct {
3709                         uint32_t ft_type:8;
3710                         uint32_t refmt_type:8;
3711                         /*
3712                          * Header reformat actions can be shared between
3713                          * non-root tables. One bit to indicate non-root
3714                          * table or not.
3715                          */
3716                         uint32_t is_root:1;
3717                         uint32_t reserve:15;
3718                 };
3719                 uint32_t v32;
3720         } encap_decap_key = {
3721                 {
3722                         .ft_type = resource->ft_type,
3723                         .refmt_type = resource->reformat_type,
3724                         .is_root = !!dev_flow->dv.group,
3725                         .reserve = 0,
3726                 }
3727         };
3728         struct mlx5_flow_cb_ctx ctx = {
3729                 .error = error,
3730                 .data = resource,
3731         };
3732         uint64_t key64;
3733
3734         resource->flags = dev_flow->dv.group ? 0 : 1;
3735         key64 =  __rte_raw_cksum(&encap_decap_key.v32,
3736                                  sizeof(encap_decap_key.v32), 0);
3737         if (resource->reformat_type !=
3738             MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2 &&
3739             resource->size)
3740                 key64 = __rte_raw_cksum(resource->buf, resource->size, key64);
3741         entry = mlx5_hlist_register(sh->encaps_decaps, key64, &ctx);
3742         if (!entry)
3743                 return -rte_errno;
3744         resource = container_of(entry, typeof(*resource), entry);
3745         dev_flow->dv.encap_decap = resource;
3746         dev_flow->handle->dvh.rix_encap_decap = resource->idx;
3747         return 0;
3748 }
3749
3750 /**
3751  * Find existing table jump resource or create and register a new one.
3752  *
3753  * @param[in, out] dev
3754  *   Pointer to rte_eth_dev structure.
3755  * @param[in, out] tbl
3756  *   Pointer to flow table resource.
3757  * @parm[in, out] dev_flow
3758  *   Pointer to the dev_flow.
3759  * @param[out] error
3760  *   pointer to error structure.
3761  *
3762  * @return
3763  *   0 on success otherwise -errno and errno is set.
3764  */
3765 static int
3766 flow_dv_jump_tbl_resource_register
3767                         (struct rte_eth_dev *dev __rte_unused,
3768                          struct mlx5_flow_tbl_resource *tbl,
3769                          struct mlx5_flow *dev_flow,
3770                          struct rte_flow_error *error __rte_unused)
3771 {
3772         struct mlx5_flow_tbl_data_entry *tbl_data =
3773                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
3774
3775         MLX5_ASSERT(tbl);
3776         MLX5_ASSERT(tbl_data->jump.action);
3777         dev_flow->handle->rix_jump = tbl_data->idx;
3778         dev_flow->dv.jump = &tbl_data->jump;
3779         return 0;
3780 }
3781
3782 int
3783 flow_dv_port_id_match_cb(struct mlx5_list *list __rte_unused,
3784                          struct mlx5_list_entry *entry, void *cb_ctx)
3785 {
3786         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3787         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3788         struct mlx5_flow_dv_port_id_action_resource *res =
3789                         container_of(entry, typeof(*res), entry);
3790
3791         return ref->port_id != res->port_id;
3792 }
3793
3794 struct mlx5_list_entry *
3795 flow_dv_port_id_create_cb(struct mlx5_list *list,
3796                           struct mlx5_list_entry *entry __rte_unused,
3797                           void *cb_ctx)
3798 {
3799         struct mlx5_dev_ctx_shared *sh = list->ctx;
3800         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3801         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3802         struct mlx5_flow_dv_port_id_action_resource *resource;
3803         uint32_t idx;
3804         int ret;
3805
3806         /* Register new port id action resource. */
3807         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
3808         if (!resource) {
3809                 rte_flow_error_set(ctx->error, ENOMEM,
3810                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3811                                    "cannot allocate port_id action memory");
3812                 return NULL;
3813         }
3814         *resource = *ref;
3815         ret = mlx5_flow_os_create_flow_action_dest_port(sh->fdb_domain,
3816                                                         ref->port_id,
3817                                                         &resource->action);
3818         if (ret) {
3819                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], idx);
3820                 rte_flow_error_set(ctx->error, ENOMEM,
3821                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3822                                    "cannot create action");
3823                 return NULL;
3824         }
3825         resource->idx = idx;
3826         return &resource->entry;
3827 }
3828
3829 struct mlx5_list_entry *
3830 flow_dv_port_id_clone_cb(struct mlx5_list *list,
3831                           struct mlx5_list_entry *entry __rte_unused,
3832                           void *cb_ctx)
3833 {
3834         struct mlx5_dev_ctx_shared *sh = list->ctx;
3835         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3836         struct mlx5_flow_dv_port_id_action_resource *resource;
3837         uint32_t idx;
3838
3839         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
3840         if (!resource) {
3841                 rte_flow_error_set(ctx->error, ENOMEM,
3842                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3843                                    "cannot allocate port_id action memory");
3844                 return NULL;
3845         }
3846         memcpy(resource, entry, sizeof(*resource));
3847         resource->idx = idx;
3848         return &resource->entry;
3849 }
3850
3851 void
3852 flow_dv_port_id_clone_free_cb(struct mlx5_list *list,
3853                           struct mlx5_list_entry *entry)
3854 {
3855         struct mlx5_dev_ctx_shared *sh = list->ctx;
3856         struct mlx5_flow_dv_port_id_action_resource *resource =
3857                         container_of(entry, typeof(*resource), entry);
3858
3859         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], resource->idx);
3860 }
3861
3862 /**
3863  * Find existing table port ID resource or create and register a new one.
3864  *
3865  * @param[in, out] dev
3866  *   Pointer to rte_eth_dev structure.
3867  * @param[in, out] ref
3868  *   Pointer to port ID action resource reference.
3869  * @parm[in, out] dev_flow
3870  *   Pointer to the dev_flow.
3871  * @param[out] error
3872  *   pointer to error structure.
3873  *
3874  * @return
3875  *   0 on success otherwise -errno and errno is set.
3876  */
3877 static int
3878 flow_dv_port_id_action_resource_register
3879                         (struct rte_eth_dev *dev,
3880                          struct mlx5_flow_dv_port_id_action_resource *ref,
3881                          struct mlx5_flow *dev_flow,
3882                          struct rte_flow_error *error)
3883 {
3884         struct mlx5_priv *priv = dev->data->dev_private;
3885         struct mlx5_list_entry *entry;
3886         struct mlx5_flow_dv_port_id_action_resource *resource;
3887         struct mlx5_flow_cb_ctx ctx = {
3888                 .error = error,
3889                 .data = ref,
3890         };
3891
3892         entry = mlx5_list_register(&priv->sh->port_id_action_list, &ctx);
3893         if (!entry)
3894                 return -rte_errno;
3895         resource = container_of(entry, typeof(*resource), entry);
3896         dev_flow->dv.port_id_action = resource;
3897         dev_flow->handle->rix_port_id_action = resource->idx;
3898         return 0;
3899 }
3900
3901 int
3902 flow_dv_push_vlan_match_cb(struct mlx5_list *list __rte_unused,
3903                          struct mlx5_list_entry *entry, void *cb_ctx)
3904 {
3905         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3906         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3907         struct mlx5_flow_dv_push_vlan_action_resource *res =
3908                         container_of(entry, typeof(*res), entry);
3909
3910         return ref->vlan_tag != res->vlan_tag || ref->ft_type != res->ft_type;
3911 }
3912
3913 struct mlx5_list_entry *
3914 flow_dv_push_vlan_create_cb(struct mlx5_list *list,
3915                           struct mlx5_list_entry *entry __rte_unused,
3916                           void *cb_ctx)
3917 {
3918         struct mlx5_dev_ctx_shared *sh = list->ctx;
3919         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3920         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3921         struct mlx5_flow_dv_push_vlan_action_resource *resource;
3922         struct mlx5dv_dr_domain *domain;
3923         uint32_t idx;
3924         int ret;
3925
3926         /* Register new port id action resource. */
3927         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
3928         if (!resource) {
3929                 rte_flow_error_set(ctx->error, ENOMEM,
3930                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3931                                    "cannot allocate push_vlan action memory");
3932                 return NULL;
3933         }
3934         *resource = *ref;
3935         if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3936                 domain = sh->fdb_domain;
3937         else if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3938                 domain = sh->rx_domain;
3939         else
3940                 domain = sh->tx_domain;
3941         ret = mlx5_flow_os_create_flow_action_push_vlan(domain, ref->vlan_tag,
3942                                                         &resource->action);
3943         if (ret) {
3944                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
3945                 rte_flow_error_set(ctx->error, ENOMEM,
3946                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3947                                    "cannot create push vlan action");
3948                 return NULL;
3949         }
3950         resource->idx = idx;
3951         return &resource->entry;
3952 }
3953
3954 struct mlx5_list_entry *
3955 flow_dv_push_vlan_clone_cb(struct mlx5_list *list,
3956                           struct mlx5_list_entry *entry __rte_unused,
3957                           void *cb_ctx)
3958 {
3959         struct mlx5_dev_ctx_shared *sh = list->ctx;
3960         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3961         struct mlx5_flow_dv_push_vlan_action_resource *resource;
3962         uint32_t idx;
3963
3964         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
3965         if (!resource) {
3966                 rte_flow_error_set(ctx->error, ENOMEM,
3967                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3968                                    "cannot allocate push_vlan action memory");
3969                 return NULL;
3970         }
3971         memcpy(resource, entry, sizeof(*resource));
3972         resource->idx = idx;
3973         return &resource->entry;
3974 }
3975
3976 void
3977 flow_dv_push_vlan_clone_free_cb(struct mlx5_list *list,
3978                             struct mlx5_list_entry *entry)
3979 {
3980         struct mlx5_dev_ctx_shared *sh = list->ctx;
3981         struct mlx5_flow_dv_push_vlan_action_resource *resource =
3982                         container_of(entry, typeof(*resource), entry);
3983
3984         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], resource->idx);
3985 }
3986
3987 /**
3988  * Find existing push vlan resource or create and register a new one.
3989  *
3990  * @param [in, out] dev
3991  *   Pointer to rte_eth_dev structure.
3992  * @param[in, out] ref
3993  *   Pointer to port ID action resource reference.
3994  * @parm[in, out] dev_flow
3995  *   Pointer to the dev_flow.
3996  * @param[out] error
3997  *   pointer to error structure.
3998  *
3999  * @return
4000  *   0 on success otherwise -errno and errno is set.
4001  */
4002 static int
4003 flow_dv_push_vlan_action_resource_register
4004                        (struct rte_eth_dev *dev,
4005                         struct mlx5_flow_dv_push_vlan_action_resource *ref,
4006                         struct mlx5_flow *dev_flow,
4007                         struct rte_flow_error *error)
4008 {
4009         struct mlx5_priv *priv = dev->data->dev_private;
4010         struct mlx5_flow_dv_push_vlan_action_resource *resource;
4011         struct mlx5_list_entry *entry;
4012         struct mlx5_flow_cb_ctx ctx = {
4013                 .error = error,
4014                 .data = ref,
4015         };
4016
4017         entry = mlx5_list_register(&priv->sh->push_vlan_action_list, &ctx);
4018         if (!entry)
4019                 return -rte_errno;
4020         resource = container_of(entry, typeof(*resource), entry);
4021
4022         dev_flow->handle->dvh.rix_push_vlan = resource->idx;
4023         dev_flow->dv.push_vlan_res = resource;
4024         return 0;
4025 }
4026
4027 /**
4028  * Get the size of specific rte_flow_item_type hdr size
4029  *
4030  * @param[in] item_type
4031  *   Tested rte_flow_item_type.
4032  *
4033  * @return
4034  *   sizeof struct item_type, 0 if void or irrelevant.
4035  */
4036 static size_t
4037 flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type)
4038 {
4039         size_t retval;
4040
4041         switch (item_type) {
4042         case RTE_FLOW_ITEM_TYPE_ETH:
4043                 retval = sizeof(struct rte_ether_hdr);
4044                 break;
4045         case RTE_FLOW_ITEM_TYPE_VLAN:
4046                 retval = sizeof(struct rte_vlan_hdr);
4047                 break;
4048         case RTE_FLOW_ITEM_TYPE_IPV4:
4049                 retval = sizeof(struct rte_ipv4_hdr);
4050                 break;
4051         case RTE_FLOW_ITEM_TYPE_IPV6:
4052                 retval = sizeof(struct rte_ipv6_hdr);
4053                 break;
4054         case RTE_FLOW_ITEM_TYPE_UDP:
4055                 retval = sizeof(struct rte_udp_hdr);
4056                 break;
4057         case RTE_FLOW_ITEM_TYPE_TCP:
4058                 retval = sizeof(struct rte_tcp_hdr);
4059                 break;
4060         case RTE_FLOW_ITEM_TYPE_VXLAN:
4061         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
4062                 retval = sizeof(struct rte_vxlan_hdr);
4063                 break;
4064         case RTE_FLOW_ITEM_TYPE_GRE:
4065         case RTE_FLOW_ITEM_TYPE_NVGRE:
4066                 retval = sizeof(struct rte_gre_hdr);
4067                 break;
4068         case RTE_FLOW_ITEM_TYPE_MPLS:
4069                 retval = sizeof(struct rte_mpls_hdr);
4070                 break;
4071         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
4072         default:
4073                 retval = 0;
4074                 break;
4075         }
4076         return retval;
4077 }
4078
4079 #define MLX5_ENCAP_IPV4_VERSION         0x40
4080 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
4081 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
4082 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
4083 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
4084 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
4085 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
4086
4087 /**
4088  * Convert the encap action data from list of rte_flow_item to raw buffer
4089  *
4090  * @param[in] items
4091  *   Pointer to rte_flow_item objects list.
4092  * @param[out] buf
4093  *   Pointer to the output buffer.
4094  * @param[out] size
4095  *   Pointer to the output buffer size.
4096  * @param[out] error
4097  *   Pointer to the error structure.
4098  *
4099  * @return
4100  *   0 on success, a negative errno value otherwise and rte_errno is set.
4101  */
4102 static int
4103 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
4104                            size_t *size, struct rte_flow_error *error)
4105 {
4106         struct rte_ether_hdr *eth = NULL;
4107         struct rte_vlan_hdr *vlan = NULL;
4108         struct rte_ipv4_hdr *ipv4 = NULL;
4109         struct rte_ipv6_hdr *ipv6 = NULL;
4110         struct rte_udp_hdr *udp = NULL;
4111         struct rte_vxlan_hdr *vxlan = NULL;
4112         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
4113         struct rte_gre_hdr *gre = NULL;
4114         size_t len;
4115         size_t temp_size = 0;
4116
4117         if (!items)
4118                 return rte_flow_error_set(error, EINVAL,
4119                                           RTE_FLOW_ERROR_TYPE_ACTION,
4120                                           NULL, "invalid empty data");
4121         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
4122                 len = flow_dv_get_item_hdr_len(items->type);
4123                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
4124                         return rte_flow_error_set(error, EINVAL,
4125                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4126                                                   (void *)items->type,
4127                                                   "items total size is too big"
4128                                                   " for encap action");
4129                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
4130                 switch (items->type) {
4131                 case RTE_FLOW_ITEM_TYPE_ETH:
4132                         eth = (struct rte_ether_hdr *)&buf[temp_size];
4133                         break;
4134                 case RTE_FLOW_ITEM_TYPE_VLAN:
4135                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
4136                         if (!eth)
4137                                 return rte_flow_error_set(error, EINVAL,
4138                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4139                                                 (void *)items->type,
4140                                                 "eth header not found");
4141                         if (!eth->ether_type)
4142                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
4143                         break;
4144                 case RTE_FLOW_ITEM_TYPE_IPV4:
4145                         ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
4146                         if (!vlan && !eth)
4147                                 return rte_flow_error_set(error, EINVAL,
4148                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4149                                                 (void *)items->type,
4150                                                 "neither eth nor vlan"
4151                                                 " header found");
4152                         if (vlan && !vlan->eth_proto)
4153                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
4154                         else if (eth && !eth->ether_type)
4155                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
4156                         if (!ipv4->version_ihl)
4157                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
4158                                                     MLX5_ENCAP_IPV4_IHL_MIN;
4159                         if (!ipv4->time_to_live)
4160                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
4161                         break;
4162                 case RTE_FLOW_ITEM_TYPE_IPV6:
4163                         ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
4164                         if (!vlan && !eth)
4165                                 return rte_flow_error_set(error, EINVAL,
4166                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4167                                                 (void *)items->type,
4168                                                 "neither eth nor vlan"
4169                                                 " header found");
4170                         if (vlan && !vlan->eth_proto)
4171                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
4172                         else if (eth && !eth->ether_type)
4173                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
4174                         if (!ipv6->vtc_flow)
4175                                 ipv6->vtc_flow =
4176                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
4177                         if (!ipv6->hop_limits)
4178                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
4179                         break;
4180                 case RTE_FLOW_ITEM_TYPE_UDP:
4181                         udp = (struct rte_udp_hdr *)&buf[temp_size];
4182                         if (!ipv4 && !ipv6)
4183                                 return rte_flow_error_set(error, EINVAL,
4184                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4185                                                 (void *)items->type,
4186                                                 "ip header not found");
4187                         if (ipv4 && !ipv4->next_proto_id)
4188                                 ipv4->next_proto_id = IPPROTO_UDP;
4189                         else if (ipv6 && !ipv6->proto)
4190                                 ipv6->proto = IPPROTO_UDP;
4191                         break;
4192                 case RTE_FLOW_ITEM_TYPE_VXLAN:
4193                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
4194                         if (!udp)
4195                                 return rte_flow_error_set(error, EINVAL,
4196                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4197                                                 (void *)items->type,
4198                                                 "udp header not found");
4199                         if (!udp->dst_port)
4200                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
4201                         if (!vxlan->vx_flags)
4202                                 vxlan->vx_flags =
4203                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
4204                         break;
4205                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
4206                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
4207                         if (!udp)
4208                                 return rte_flow_error_set(error, EINVAL,
4209                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4210                                                 (void *)items->type,
4211                                                 "udp header not found");
4212                         if (!vxlan_gpe->proto)
4213                                 return rte_flow_error_set(error, EINVAL,
4214                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4215                                                 (void *)items->type,
4216                                                 "next protocol not found");
4217                         if (!udp->dst_port)
4218                                 udp->dst_port =
4219                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
4220                         if (!vxlan_gpe->vx_flags)
4221                                 vxlan_gpe->vx_flags =
4222                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
4223                         break;
4224                 case RTE_FLOW_ITEM_TYPE_GRE:
4225                 case RTE_FLOW_ITEM_TYPE_NVGRE:
4226                         gre = (struct rte_gre_hdr *)&buf[temp_size];
4227                         if (!gre->proto)
4228                                 return rte_flow_error_set(error, EINVAL,
4229                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4230                                                 (void *)items->type,
4231                                                 "next protocol not found");
4232                         if (!ipv4 && !ipv6)
4233                                 return rte_flow_error_set(error, EINVAL,
4234                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4235                                                 (void *)items->type,
4236                                                 "ip header not found");
4237                         if (ipv4 && !ipv4->next_proto_id)
4238                                 ipv4->next_proto_id = IPPROTO_GRE;
4239                         else if (ipv6 && !ipv6->proto)
4240                                 ipv6->proto = IPPROTO_GRE;
4241                         break;
4242                 case RTE_FLOW_ITEM_TYPE_VOID:
4243                         break;
4244                 default:
4245                         return rte_flow_error_set(error, EINVAL,
4246                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4247                                                   (void *)items->type,
4248                                                   "unsupported item type");
4249                         break;
4250                 }
4251                 temp_size += len;
4252         }
4253         *size = temp_size;
4254         return 0;
4255 }
4256
4257 static int
4258 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
4259 {
4260         struct rte_ether_hdr *eth = NULL;
4261         struct rte_vlan_hdr *vlan = NULL;
4262         struct rte_ipv6_hdr *ipv6 = NULL;
4263         struct rte_udp_hdr *udp = NULL;
4264         char *next_hdr;
4265         uint16_t proto;
4266
4267         eth = (struct rte_ether_hdr *)data;
4268         next_hdr = (char *)(eth + 1);
4269         proto = RTE_BE16(eth->ether_type);
4270
4271         /* VLAN skipping */
4272         while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
4273                 vlan = (struct rte_vlan_hdr *)next_hdr;
4274                 proto = RTE_BE16(vlan->eth_proto);
4275                 next_hdr += sizeof(struct rte_vlan_hdr);
4276         }
4277
4278         /* HW calculates IPv4 csum. no need to proceed */
4279         if (proto == RTE_ETHER_TYPE_IPV4)
4280                 return 0;
4281
4282         /* non IPv4/IPv6 header. not supported */
4283         if (proto != RTE_ETHER_TYPE_IPV6) {
4284                 return rte_flow_error_set(error, ENOTSUP,
4285                                           RTE_FLOW_ERROR_TYPE_ACTION,
4286                                           NULL, "Cannot offload non IPv4/IPv6");
4287         }
4288
4289         ipv6 = (struct rte_ipv6_hdr *)next_hdr;
4290
4291         /* ignore non UDP */
4292         if (ipv6->proto != IPPROTO_UDP)
4293                 return 0;
4294
4295         udp = (struct rte_udp_hdr *)(ipv6 + 1);
4296         udp->dgram_cksum = 0;
4297
4298         return 0;
4299 }
4300
4301 /**
4302  * Convert L2 encap action to DV specification.
4303  *
4304  * @param[in] dev
4305  *   Pointer to rte_eth_dev structure.
4306  * @param[in] action
4307  *   Pointer to action structure.
4308  * @param[in, out] dev_flow
4309  *   Pointer to the mlx5_flow.
4310  * @param[in] transfer
4311  *   Mark if the flow is E-Switch flow.
4312  * @param[out] error
4313  *   Pointer to the error structure.
4314  *
4315  * @return
4316  *   0 on success, a negative errno value otherwise and rte_errno is set.
4317  */
4318 static int
4319 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
4320                                const struct rte_flow_action *action,
4321                                struct mlx5_flow *dev_flow,
4322                                uint8_t transfer,
4323                                struct rte_flow_error *error)
4324 {
4325         const struct rte_flow_item *encap_data;
4326         const struct rte_flow_action_raw_encap *raw_encap_data;
4327         struct mlx5_flow_dv_encap_decap_resource res = {
4328                 .reformat_type =
4329                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
4330                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4331                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
4332         };
4333
4334         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
4335                 raw_encap_data =
4336                         (const struct rte_flow_action_raw_encap *)action->conf;
4337                 res.size = raw_encap_data->size;
4338                 memcpy(res.buf, raw_encap_data->data, res.size);
4339         } else {
4340                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
4341                         encap_data =
4342                                 ((const struct rte_flow_action_vxlan_encap *)
4343                                                 action->conf)->definition;
4344                 else
4345                         encap_data =
4346                                 ((const struct rte_flow_action_nvgre_encap *)
4347                                                 action->conf)->definition;
4348                 if (flow_dv_convert_encap_data(encap_data, res.buf,
4349                                                &res.size, error))
4350                         return -rte_errno;
4351         }
4352         if (flow_dv_zero_encap_udp_csum(res.buf, error))
4353                 return -rte_errno;
4354         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4355                 return rte_flow_error_set(error, EINVAL,
4356                                           RTE_FLOW_ERROR_TYPE_ACTION,
4357                                           NULL, "can't create L2 encap action");
4358         return 0;
4359 }
4360
4361 /**
4362  * Convert L2 decap action to DV specification.
4363  *
4364  * @param[in] dev
4365  *   Pointer to rte_eth_dev structure.
4366  * @param[in, out] dev_flow
4367  *   Pointer to the mlx5_flow.
4368  * @param[in] transfer
4369  *   Mark if the flow is E-Switch flow.
4370  * @param[out] error
4371  *   Pointer to the error structure.
4372  *
4373  * @return
4374  *   0 on success, a negative errno value otherwise and rte_errno is set.
4375  */
4376 static int
4377 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
4378                                struct mlx5_flow *dev_flow,
4379                                uint8_t transfer,
4380                                struct rte_flow_error *error)
4381 {
4382         struct mlx5_flow_dv_encap_decap_resource res = {
4383                 .size = 0,
4384                 .reformat_type =
4385                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
4386                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
4387                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
4388         };
4389
4390         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4391                 return rte_flow_error_set(error, EINVAL,
4392                                           RTE_FLOW_ERROR_TYPE_ACTION,
4393                                           NULL, "can't create L2 decap action");
4394         return 0;
4395 }
4396
4397 /**
4398  * Convert raw decap/encap (L3 tunnel) action to DV specification.
4399  *
4400  * @param[in] dev
4401  *   Pointer to rte_eth_dev structure.
4402  * @param[in] action
4403  *   Pointer to action structure.
4404  * @param[in, out] dev_flow
4405  *   Pointer to the mlx5_flow.
4406  * @param[in] attr
4407  *   Pointer to the flow attributes.
4408  * @param[out] error
4409  *   Pointer to the error structure.
4410  *
4411  * @return
4412  *   0 on success, a negative errno value otherwise and rte_errno is set.
4413  */
4414 static int
4415 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
4416                                 const struct rte_flow_action *action,
4417                                 struct mlx5_flow *dev_flow,
4418                                 const struct rte_flow_attr *attr,
4419                                 struct rte_flow_error *error)
4420 {
4421         const struct rte_flow_action_raw_encap *encap_data;
4422         struct mlx5_flow_dv_encap_decap_resource res;
4423
4424         memset(&res, 0, sizeof(res));
4425         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
4426         res.size = encap_data->size;
4427         memcpy(res.buf, encap_data->data, res.size);
4428         res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
4429                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
4430                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
4431         if (attr->transfer)
4432                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4433         else
4434                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4435                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4436         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
4437                 return rte_flow_error_set(error, EINVAL,
4438                                           RTE_FLOW_ERROR_TYPE_ACTION,
4439                                           NULL, "can't create encap action");
4440         return 0;
4441 }
4442
4443 /**
4444  * Create action push VLAN.
4445  *
4446  * @param[in] dev
4447  *   Pointer to rte_eth_dev structure.
4448  * @param[in] attr
4449  *   Pointer to the flow attributes.
4450  * @param[in] vlan
4451  *   Pointer to the vlan to push to the Ethernet header.
4452  * @param[in, out] dev_flow
4453  *   Pointer to the mlx5_flow.
4454  * @param[out] error
4455  *   Pointer to the error structure.
4456  *
4457  * @return
4458  *   0 on success, a negative errno value otherwise and rte_errno is set.
4459  */
4460 static int
4461 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
4462                                 const struct rte_flow_attr *attr,
4463                                 const struct rte_vlan_hdr *vlan,
4464                                 struct mlx5_flow *dev_flow,
4465                                 struct rte_flow_error *error)
4466 {
4467         struct mlx5_flow_dv_push_vlan_action_resource res;
4468
4469         memset(&res, 0, sizeof(res));
4470         res.vlan_tag =
4471                 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
4472                                  vlan->vlan_tci);
4473         if (attr->transfer)
4474                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4475         else
4476                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4477                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
4478         return flow_dv_push_vlan_action_resource_register
4479                                             (dev, &res, dev_flow, error);
4480 }
4481
4482 /**
4483  * Validate the modify-header actions.
4484  *
4485  * @param[in] action_flags
4486  *   Holds the actions detected until now.
4487  * @param[in] action
4488  *   Pointer to the modify action.
4489  * @param[out] error
4490  *   Pointer to error structure.
4491  *
4492  * @return
4493  *   0 on success, a negative errno value otherwise and rte_errno is set.
4494  */
4495 static int
4496 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
4497                                    const struct rte_flow_action *action,
4498                                    struct rte_flow_error *error)
4499 {
4500         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
4501                 return rte_flow_error_set(error, EINVAL,
4502                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4503                                           NULL, "action configuration not set");
4504         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
4505                 return rte_flow_error_set(error, EINVAL,
4506                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4507                                           "can't have encap action before"
4508                                           " modify action");
4509         return 0;
4510 }
4511
4512 /**
4513  * Validate the modify-header MAC address actions.
4514  *
4515  * @param[in] action_flags
4516  *   Holds the actions detected until now.
4517  * @param[in] action
4518  *   Pointer to the modify action.
4519  * @param[in] item_flags
4520  *   Holds the items detected.
4521  * @param[out] error
4522  *   Pointer to error structure.
4523  *
4524  * @return
4525  *   0 on success, a negative errno value otherwise and rte_errno is set.
4526  */
4527 static int
4528 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
4529                                    const struct rte_flow_action *action,
4530                                    const uint64_t item_flags,
4531                                    struct rte_flow_error *error)
4532 {
4533         int ret = 0;
4534
4535         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4536         if (!ret) {
4537                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
4538                         return rte_flow_error_set(error, EINVAL,
4539                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4540                                                   NULL,
4541                                                   "no L2 item in pattern");
4542         }
4543         return ret;
4544 }
4545
4546 /**
4547  * Validate the modify-header IPv4 address actions.
4548  *
4549  * @param[in] action_flags
4550  *   Holds the actions detected until now.
4551  * @param[in] action
4552  *   Pointer to the modify action.
4553  * @param[in] item_flags
4554  *   Holds the items detected.
4555  * @param[out] error
4556  *   Pointer to error structure.
4557  *
4558  * @return
4559  *   0 on success, a negative errno value otherwise and rte_errno is set.
4560  */
4561 static int
4562 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
4563                                     const struct rte_flow_action *action,
4564                                     const uint64_t item_flags,
4565                                     struct rte_flow_error *error)
4566 {
4567         int ret = 0;
4568         uint64_t layer;
4569
4570         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4571         if (!ret) {
4572                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4573                                  MLX5_FLOW_LAYER_INNER_L3_IPV4 :
4574                                  MLX5_FLOW_LAYER_OUTER_L3_IPV4;
4575                 if (!(item_flags & layer))
4576                         return rte_flow_error_set(error, EINVAL,
4577                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4578                                                   NULL,
4579                                                   "no ipv4 item in pattern");
4580         }
4581         return ret;
4582 }
4583
4584 /**
4585  * Validate the modify-header IPv6 address actions.
4586  *
4587  * @param[in] action_flags
4588  *   Holds the actions detected until now.
4589  * @param[in] action
4590  *   Pointer to the modify action.
4591  * @param[in] item_flags
4592  *   Holds the items detected.
4593  * @param[out] error
4594  *   Pointer to error structure.
4595  *
4596  * @return
4597  *   0 on success, a negative errno value otherwise and rte_errno is set.
4598  */
4599 static int
4600 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
4601                                     const struct rte_flow_action *action,
4602                                     const uint64_t item_flags,
4603                                     struct rte_flow_error *error)
4604 {
4605         int ret = 0;
4606         uint64_t layer;
4607
4608         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4609         if (!ret) {
4610                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4611                                  MLX5_FLOW_LAYER_INNER_L3_IPV6 :
4612                                  MLX5_FLOW_LAYER_OUTER_L3_IPV6;
4613                 if (!(item_flags & layer))
4614                         return rte_flow_error_set(error, EINVAL,
4615                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4616                                                   NULL,
4617                                                   "no ipv6 item in pattern");
4618         }
4619         return ret;
4620 }
4621
4622 /**
4623  * Validate the modify-header TP actions.
4624  *
4625  * @param[in] action_flags
4626  *   Holds the actions detected until now.
4627  * @param[in] action
4628  *   Pointer to the modify action.
4629  * @param[in] item_flags
4630  *   Holds the items detected.
4631  * @param[out] error
4632  *   Pointer to error structure.
4633  *
4634  * @return
4635  *   0 on success, a negative errno value otherwise and rte_errno is set.
4636  */
4637 static int
4638 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
4639                                   const struct rte_flow_action *action,
4640                                   const uint64_t item_flags,
4641                                   struct rte_flow_error *error)
4642 {
4643         int ret = 0;
4644         uint64_t layer;
4645
4646         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4647         if (!ret) {
4648                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4649                                  MLX5_FLOW_LAYER_INNER_L4 :
4650                                  MLX5_FLOW_LAYER_OUTER_L4;
4651                 if (!(item_flags & layer))
4652                         return rte_flow_error_set(error, EINVAL,
4653                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4654                                                   NULL, "no transport layer "
4655                                                   "in pattern");
4656         }
4657         return ret;
4658 }
4659
4660 /**
4661  * Validate the modify-header actions of increment/decrement
4662  * TCP Sequence-number.
4663  *
4664  * @param[in] action_flags
4665  *   Holds the actions detected until now.
4666  * @param[in] action
4667  *   Pointer to the modify action.
4668  * @param[in] item_flags
4669  *   Holds the items detected.
4670  * @param[out] error
4671  *   Pointer to error structure.
4672  *
4673  * @return
4674  *   0 on success, a negative errno value otherwise and rte_errno is set.
4675  */
4676 static int
4677 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
4678                                        const struct rte_flow_action *action,
4679                                        const uint64_t item_flags,
4680                                        struct rte_flow_error *error)
4681 {
4682         int ret = 0;
4683         uint64_t layer;
4684
4685         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4686         if (!ret) {
4687                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4688                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
4689                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
4690                 if (!(item_flags & layer))
4691                         return rte_flow_error_set(error, EINVAL,
4692                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4693                                                   NULL, "no TCP item in"
4694                                                   " pattern");
4695                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
4696                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
4697                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
4698                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
4699                         return rte_flow_error_set(error, EINVAL,
4700                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4701                                                   NULL,
4702                                                   "cannot decrease and increase"
4703                                                   " TCP sequence number"
4704                                                   " at the same time");
4705         }
4706         return ret;
4707 }
4708
4709 /**
4710  * Validate the modify-header actions of increment/decrement
4711  * TCP Acknowledgment number.
4712  *
4713  * @param[in] action_flags
4714  *   Holds the actions detected until now.
4715  * @param[in] action
4716  *   Pointer to the modify action.
4717  * @param[in] item_flags
4718  *   Holds the items detected.
4719  * @param[out] error
4720  *   Pointer to error structure.
4721  *
4722  * @return
4723  *   0 on success, a negative errno value otherwise and rte_errno is set.
4724  */
4725 static int
4726 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
4727                                        const struct rte_flow_action *action,
4728                                        const uint64_t item_flags,
4729                                        struct rte_flow_error *error)
4730 {
4731         int ret = 0;
4732         uint64_t layer;
4733
4734         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4735         if (!ret) {
4736                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4737                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
4738                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
4739                 if (!(item_flags & layer))
4740                         return rte_flow_error_set(error, EINVAL,
4741                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4742                                                   NULL, "no TCP item in"
4743                                                   " pattern");
4744                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
4745                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
4746                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
4747                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
4748                         return rte_flow_error_set(error, EINVAL,
4749                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4750                                                   NULL,
4751                                                   "cannot decrease and increase"
4752                                                   " TCP acknowledgment number"
4753                                                   " at the same time");
4754         }
4755         return ret;
4756 }
4757
4758 /**
4759  * Validate the modify-header TTL actions.
4760  *
4761  * @param[in] action_flags
4762  *   Holds the actions detected until now.
4763  * @param[in] action
4764  *   Pointer to the modify action.
4765  * @param[in] item_flags
4766  *   Holds the items detected.
4767  * @param[out] error
4768  *   Pointer to error structure.
4769  *
4770  * @return
4771  *   0 on success, a negative errno value otherwise and rte_errno is set.
4772  */
4773 static int
4774 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
4775                                    const struct rte_flow_action *action,
4776                                    const uint64_t item_flags,
4777                                    struct rte_flow_error *error)
4778 {
4779         int ret = 0;
4780         uint64_t layer;
4781
4782         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4783         if (!ret) {
4784                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
4785                                  MLX5_FLOW_LAYER_INNER_L3 :
4786                                  MLX5_FLOW_LAYER_OUTER_L3;
4787                 if (!(item_flags & layer))
4788                         return rte_flow_error_set(error, EINVAL,
4789                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4790                                                   NULL,
4791                                                   "no IP protocol in pattern");
4792         }
4793         return ret;
4794 }
4795
4796 /**
4797  * Validate the generic modify field actions.
4798  * @param[in] dev
4799  *   Pointer to the rte_eth_dev structure.
4800  * @param[in] action_flags
4801  *   Holds the actions detected until now.
4802  * @param[in] action
4803  *   Pointer to the modify action.
4804  * @param[in] attr
4805  *   Pointer to the flow attributes.
4806  * @param[out] error
4807  *   Pointer to error structure.
4808  *
4809  * @return
4810  *   Number of header fields to modify (0 or more) on success,
4811  *   a negative errno value otherwise and rte_errno is set.
4812  */
4813 static int
4814 flow_dv_validate_action_modify_field(struct rte_eth_dev *dev,
4815                                    const uint64_t action_flags,
4816                                    const struct rte_flow_action *action,
4817                                    const struct rte_flow_attr *attr,
4818                                    struct rte_flow_error *error)
4819 {
4820         int ret = 0;
4821         struct mlx5_priv *priv = dev->data->dev_private;
4822         struct mlx5_dev_config *config = &priv->config;
4823         const struct rte_flow_action_modify_field *action_modify_field =
4824                 action->conf;
4825         uint32_t dst_width = mlx5_flow_item_field_width(config,
4826                                 action_modify_field->dst.field);
4827         uint32_t src_width = mlx5_flow_item_field_width(config,
4828                                 action_modify_field->src.field);
4829
4830         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4831         if (ret)
4832                 return ret;
4833
4834         if (action_modify_field->width == 0)
4835                 return rte_flow_error_set(error, EINVAL,
4836                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4837                                 "no bits are requested to be modified");
4838         else if (action_modify_field->width > dst_width ||
4839                  action_modify_field->width > src_width)
4840                 return rte_flow_error_set(error, EINVAL,
4841                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4842                                 "cannot modify more bits than"
4843                                 " the width of a field");
4844         if (action_modify_field->dst.field != RTE_FLOW_FIELD_VALUE &&
4845             action_modify_field->dst.field != RTE_FLOW_FIELD_POINTER) {
4846                 if ((action_modify_field->dst.offset +
4847                      action_modify_field->width > dst_width) ||
4848                     (action_modify_field->dst.offset % 32))
4849                         return rte_flow_error_set(error, EINVAL,
4850                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4851                                         "destination offset is too big"
4852                                         " or not aligned to 4 bytes");
4853                 if (action_modify_field->dst.level &&
4854                     action_modify_field->dst.field != RTE_FLOW_FIELD_TAG)
4855                         return rte_flow_error_set(error, ENOTSUP,
4856                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4857                                         "inner header fields modification"
4858                                         " is not supported");
4859         }
4860         if (action_modify_field->src.field != RTE_FLOW_FIELD_VALUE &&
4861             action_modify_field->src.field != RTE_FLOW_FIELD_POINTER) {
4862                 if (!attr->transfer && !attr->group)
4863                         return rte_flow_error_set(error, ENOTSUP,
4864                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4865                                         "modify field action is not"
4866                                         " supported for group 0");
4867                 if ((action_modify_field->src.offset +
4868                      action_modify_field->width > src_width) ||
4869                     (action_modify_field->src.offset % 32))
4870                         return rte_flow_error_set(error, EINVAL,
4871                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4872                                         "source offset is too big"
4873                                         " or not aligned to 4 bytes");
4874                 if (action_modify_field->src.level &&
4875                     action_modify_field->src.field != RTE_FLOW_FIELD_TAG)
4876                         return rte_flow_error_set(error, ENOTSUP,
4877                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4878                                         "inner header fields modification"
4879                                         " is not supported");
4880         }
4881         if ((action_modify_field->dst.field ==
4882              action_modify_field->src.field) &&
4883             (action_modify_field->dst.level ==
4884              action_modify_field->src.level))
4885                 return rte_flow_error_set(error, EINVAL,
4886                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4887                                 "source and destination fields"
4888                                 " cannot be the same");
4889         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VALUE ||
4890             action_modify_field->dst.field == RTE_FLOW_FIELD_POINTER)
4891                 return rte_flow_error_set(error, EINVAL,
4892                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4893                                 "immediate value or a pointer to it"
4894                                 " cannot be used as a destination");
4895         if (action_modify_field->dst.field == RTE_FLOW_FIELD_START ||
4896             action_modify_field->src.field == RTE_FLOW_FIELD_START)
4897                 return rte_flow_error_set(error, ENOTSUP,
4898                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4899                                 "modifications of an arbitrary"
4900                                 " place in a packet is not supported");
4901         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VLAN_TYPE ||
4902             action_modify_field->src.field == RTE_FLOW_FIELD_VLAN_TYPE)
4903                 return rte_flow_error_set(error, ENOTSUP,
4904                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4905                                 "modifications of the 802.1Q Tag"
4906                                 " Identifier is not supported");
4907         if (action_modify_field->dst.field == RTE_FLOW_FIELD_VXLAN_VNI ||
4908             action_modify_field->src.field == RTE_FLOW_FIELD_VXLAN_VNI)
4909                 return rte_flow_error_set(error, ENOTSUP,
4910                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4911                                 "modifications of the VXLAN Network"
4912                                 " Identifier is not supported");
4913         if (action_modify_field->dst.field == RTE_FLOW_FIELD_GENEVE_VNI ||
4914             action_modify_field->src.field == RTE_FLOW_FIELD_GENEVE_VNI)
4915                 return rte_flow_error_set(error, ENOTSUP,
4916                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4917                                 "modifications of the GENEVE Network"
4918                                 " Identifier is not supported");
4919         if (action_modify_field->dst.field == RTE_FLOW_FIELD_MARK ||
4920             action_modify_field->src.field == RTE_FLOW_FIELD_MARK ||
4921             action_modify_field->dst.field == RTE_FLOW_FIELD_META ||
4922             action_modify_field->src.field == RTE_FLOW_FIELD_META) {
4923                 if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
4924                     !mlx5_flow_ext_mreg_supported(dev))
4925                         return rte_flow_error_set(error, ENOTSUP,
4926                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
4927                                         "cannot modify mark or metadata without"
4928                                         " extended metadata register support");
4929         }
4930         if (action_modify_field->operation != RTE_FLOW_MODIFY_SET)
4931                 return rte_flow_error_set(error, ENOTSUP,
4932                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
4933                                 "add and sub operations"
4934                                 " are not supported");
4935         return (action_modify_field->width / 32) +
4936                !!(action_modify_field->width % 32);
4937 }
4938
4939 /**
4940  * Validate jump action.
4941  *
4942  * @param[in] action
4943  *   Pointer to the jump action.
4944  * @param[in] action_flags
4945  *   Holds the actions detected until now.
4946  * @param[in] attributes
4947  *   Pointer to flow attributes
4948  * @param[in] external
4949  *   Action belongs to flow rule created by request external to PMD.
4950  * @param[out] error
4951  *   Pointer to error structure.
4952  *
4953  * @return
4954  *   0 on success, a negative errno value otherwise and rte_errno is set.
4955  */
4956 static int
4957 flow_dv_validate_action_jump(struct rte_eth_dev *dev,
4958                              const struct mlx5_flow_tunnel *tunnel,
4959                              const struct rte_flow_action *action,
4960                              uint64_t action_flags,
4961                              const struct rte_flow_attr *attributes,
4962                              bool external, struct rte_flow_error *error)
4963 {
4964         uint32_t target_group, table;
4965         int ret = 0;
4966         struct flow_grp_info grp_info = {
4967                 .external = !!external,
4968                 .transfer = !!attributes->transfer,
4969                 .fdb_def_rule = 1,
4970                 .std_tbl_fix = 0
4971         };
4972         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
4973                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4974                 return rte_flow_error_set(error, EINVAL,
4975                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4976                                           "can't have 2 fate actions in"
4977                                           " same flow");
4978         if (!action->conf)
4979                 return rte_flow_error_set(error, EINVAL,
4980                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4981                                           NULL, "action configuration not set");
4982         target_group =
4983                 ((const struct rte_flow_action_jump *)action->conf)->group;
4984         ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table,
4985                                        &grp_info, error);
4986         if (ret)
4987                 return ret;
4988         if (attributes->group == target_group &&
4989             !(action_flags & (MLX5_FLOW_ACTION_TUNNEL_SET |
4990                               MLX5_FLOW_ACTION_TUNNEL_MATCH)))
4991                 return rte_flow_error_set(error, EINVAL,
4992                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4993                                           "target group must be other than"
4994                                           " the current flow group");
4995         return 0;
4996 }
4997
4998 /*
4999  * Validate the port_id action.
5000  *
5001  * @param[in] dev
5002  *   Pointer to rte_eth_dev structure.
5003  * @param[in] action_flags
5004  *   Bit-fields that holds the actions detected until now.
5005  * @param[in] action
5006  *   Port_id RTE action structure.
5007  * @param[in] attr
5008  *   Attributes of flow that includes this action.
5009  * @param[out] error
5010  *   Pointer to error structure.
5011  *
5012  * @return
5013  *   0 on success, a negative errno value otherwise and rte_errno is set.
5014  */
5015 static int
5016 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
5017                                 uint64_t action_flags,
5018                                 const struct rte_flow_action *action,
5019                                 const struct rte_flow_attr *attr,
5020                                 struct rte_flow_error *error)
5021 {
5022         const struct rte_flow_action_port_id *port_id;
5023         struct mlx5_priv *act_priv;
5024         struct mlx5_priv *dev_priv;
5025         uint16_t port;
5026
5027         if (!attr->transfer)
5028                 return rte_flow_error_set(error, ENOTSUP,
5029                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5030                                           NULL,
5031                                           "port id action is valid in transfer"
5032                                           " mode only");
5033         if (!action || !action->conf)
5034                 return rte_flow_error_set(error, ENOTSUP,
5035                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
5036                                           NULL,
5037                                           "port id action parameters must be"
5038                                           " specified");
5039         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
5040                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
5041                 return rte_flow_error_set(error, EINVAL,
5042                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5043                                           "can have only one fate actions in"
5044                                           " a flow");
5045         dev_priv = mlx5_dev_to_eswitch_info(dev);
5046         if (!dev_priv)
5047                 return rte_flow_error_set(error, rte_errno,
5048                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5049                                           NULL,
5050                                           "failed to obtain E-Switch info");
5051         port_id = action->conf;
5052         port = port_id->original ? dev->data->port_id : port_id->id;
5053         act_priv = mlx5_port_to_eswitch_info(port, false);
5054         if (!act_priv)
5055                 return rte_flow_error_set
5056                                 (error, rte_errno,
5057                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
5058                                  "failed to obtain E-Switch port id for port");
5059         if (act_priv->domain_id != dev_priv->domain_id)
5060                 return rte_flow_error_set
5061                                 (error, EINVAL,
5062                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5063                                  "port does not belong to"
5064                                  " E-Switch being configured");
5065         return 0;
5066 }
5067
5068 /**
5069  * Get the maximum number of modify header actions.
5070  *
5071  * @param dev
5072  *   Pointer to rte_eth_dev structure.
5073  * @param root
5074  *   Whether action is on root table.
5075  *
5076  * @return
5077  *   Max number of modify header actions device can support.
5078  */
5079 static inline unsigned int
5080 flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
5081                               bool root)
5082 {
5083         /*
5084          * There's no way to directly query the max capacity from FW.
5085          * The maximal value on root table should be assumed to be supported.
5086          */
5087         if (!root)
5088                 return MLX5_MAX_MODIFY_NUM;
5089         else
5090                 return MLX5_ROOT_TBL_MODIFY_NUM;
5091 }
5092
5093 /**
5094  * Validate the meter action.
5095  *
5096  * @param[in] dev
5097  *   Pointer to rte_eth_dev structure.
5098  * @param[in] action_flags
5099  *   Bit-fields that holds the actions detected until now.
5100  * @param[in] action
5101  *   Pointer to the meter action.
5102  * @param[in] attr
5103  *   Attributes of flow that includes this action.
5104  * @param[in] port_id_item
5105  *   Pointer to item indicating port id.
5106  * @param[out] error
5107  *   Pointer to error structure.
5108  *
5109  * @return
5110  *   0 on success, a negative errno value otherwise and rte_ernno is set.
5111  */
5112 static int
5113 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
5114                                 uint64_t action_flags,
5115                                 const struct rte_flow_action *action,
5116                                 const struct rte_flow_attr *attr,
5117                                 const struct rte_flow_item *port_id_item,
5118                                 bool *def_policy,
5119                                 struct rte_flow_error *error)
5120 {
5121         struct mlx5_priv *priv = dev->data->dev_private;
5122         const struct rte_flow_action_meter *am = action->conf;
5123         struct mlx5_flow_meter_info *fm;
5124         struct mlx5_flow_meter_policy *mtr_policy;
5125         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
5126
5127         if (!am)
5128                 return rte_flow_error_set(error, EINVAL,
5129                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5130                                           "meter action conf is NULL");
5131
5132         if (action_flags & MLX5_FLOW_ACTION_METER)
5133                 return rte_flow_error_set(error, ENOTSUP,
5134                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5135                                           "meter chaining not support");
5136         if (action_flags & MLX5_FLOW_ACTION_JUMP)
5137                 return rte_flow_error_set(error, ENOTSUP,
5138                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5139                                           "meter with jump not support");
5140         if (!priv->mtr_en)
5141                 return rte_flow_error_set(error, ENOTSUP,
5142                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5143                                           NULL,
5144                                           "meter action not supported");
5145         fm = mlx5_flow_meter_find(priv, am->mtr_id, NULL);
5146         if (!fm)
5147                 return rte_flow_error_set(error, EINVAL,
5148                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5149                                           "Meter not found");
5150         /* aso meter can always be shared by different domains */
5151         if (fm->ref_cnt && !priv->sh->meter_aso_en &&
5152             !(fm->transfer == attr->transfer ||
5153               (!fm->ingress && !attr->ingress && attr->egress) ||
5154               (!fm->egress && !attr->egress && attr->ingress)))
5155                 return rte_flow_error_set(error, EINVAL,
5156                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5157                         "Flow attributes domain are either invalid "
5158                         "or have a domain conflict with current "
5159                         "meter attributes");
5160         if (fm->def_policy) {
5161                 if (!((attr->transfer &&
5162                         mtrmng->def_policy[MLX5_MTR_DOMAIN_TRANSFER]) ||
5163                         (attr->egress &&
5164                         mtrmng->def_policy[MLX5_MTR_DOMAIN_EGRESS]) ||
5165                         (attr->ingress &&
5166                         mtrmng->def_policy[MLX5_MTR_DOMAIN_INGRESS])))
5167                         return rte_flow_error_set(error, EINVAL,
5168                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5169                                           "Flow attributes domain "
5170                                           "have a conflict with current "
5171                                           "meter domain attributes");
5172                 *def_policy = true;
5173         } else {
5174                 mtr_policy = mlx5_flow_meter_policy_find(dev,
5175                                                 fm->policy_id, NULL);
5176                 if (!mtr_policy)
5177                         return rte_flow_error_set(error, EINVAL,
5178                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5179                                           "Invalid policy id for meter ");
5180                 if (!((attr->transfer && mtr_policy->transfer) ||
5181                         (attr->egress && mtr_policy->egress) ||
5182                         (attr->ingress && mtr_policy->ingress)))
5183                         return rte_flow_error_set(error, EINVAL,
5184                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5185                                           "Flow attributes domain "
5186                                           "have a conflict with current "
5187                                           "meter domain attributes");
5188                 if (attr->transfer && mtr_policy->dev) {
5189                         /**
5190                          * When policy has fate action of port_id,
5191                          * the flow should have the same src port as policy.
5192                          */
5193                         struct mlx5_priv *policy_port_priv =
5194                                         mtr_policy->dev->data->dev_private;
5195                         int32_t flow_src_port = priv->representor_id;
5196
5197                         if (port_id_item) {
5198                                 const struct rte_flow_item_port_id *spec =
5199                                                         port_id_item->spec;
5200                                 struct mlx5_priv *port_priv =
5201                                         mlx5_port_to_eswitch_info(spec->id,
5202                                                                   false);
5203                                 if (!port_priv)
5204                                         return rte_flow_error_set(error,
5205                                                 rte_errno,
5206                                                 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
5207                                                 spec,
5208                                                 "Failed to get port info.");
5209                                 flow_src_port = port_priv->representor_id;
5210                         }
5211                         if (flow_src_port != policy_port_priv->representor_id)
5212                                 return rte_flow_error_set(error,
5213                                                 rte_errno,
5214                                                 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
5215                                                 NULL,
5216                                                 "Flow and meter policy "
5217                                                 "have different src port.");
5218                 }
5219                 *def_policy = false;
5220         }
5221         return 0;
5222 }
5223
5224 /**
5225  * Validate the age action.
5226  *
5227  * @param[in] action_flags
5228  *   Holds the actions detected until now.
5229  * @param[in] action
5230  *   Pointer to the age action.
5231  * @param[in] dev
5232  *   Pointer to the Ethernet device structure.
5233  * @param[out] error
5234  *   Pointer to error structure.
5235  *
5236  * @return
5237  *   0 on success, a negative errno value otherwise and rte_errno is set.
5238  */
5239 static int
5240 flow_dv_validate_action_age(uint64_t action_flags,
5241                             const struct rte_flow_action *action,
5242                             struct rte_eth_dev *dev,
5243                             struct rte_flow_error *error)
5244 {
5245         struct mlx5_priv *priv = dev->data->dev_private;
5246         const struct rte_flow_action_age *age = action->conf;
5247
5248         if (!priv->config.devx || (priv->sh->cmng.counter_fallback &&
5249             !priv->sh->aso_age_mng))
5250                 return rte_flow_error_set(error, ENOTSUP,
5251                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5252                                           NULL,
5253                                           "age action not supported");
5254         if (!(action->conf))
5255                 return rte_flow_error_set(error, EINVAL,
5256                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5257                                           "configuration cannot be null");
5258         if (!(age->timeout))
5259                 return rte_flow_error_set(error, EINVAL,
5260                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5261                                           "invalid timeout value 0");
5262         if (action_flags & MLX5_FLOW_ACTION_AGE)
5263                 return rte_flow_error_set(error, EINVAL,
5264                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5265                                           "duplicate age actions set");
5266         return 0;
5267 }
5268
5269 /**
5270  * Validate the modify-header IPv4 DSCP actions.
5271  *
5272  * @param[in] action_flags
5273  *   Holds the actions detected until now.
5274  * @param[in] action
5275  *   Pointer to the modify action.
5276  * @param[in] item_flags
5277  *   Holds the items detected.
5278  * @param[out] error
5279  *   Pointer to error structure.
5280  *
5281  * @return
5282  *   0 on success, a negative errno value otherwise and rte_errno is set.
5283  */
5284 static int
5285 flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags,
5286                                          const struct rte_flow_action *action,
5287                                          const uint64_t item_flags,
5288                                          struct rte_flow_error *error)
5289 {
5290         int ret = 0;
5291
5292         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5293         if (!ret) {
5294                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
5295                         return rte_flow_error_set(error, EINVAL,
5296                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5297                                                   NULL,
5298                                                   "no ipv4 item in pattern");
5299         }
5300         return ret;
5301 }
5302
5303 /**
5304  * Validate the modify-header IPv6 DSCP actions.
5305  *
5306  * @param[in] action_flags
5307  *   Holds the actions detected until now.
5308  * @param[in] action
5309  *   Pointer to the modify action.
5310  * @param[in] item_flags
5311  *   Holds the items detected.
5312  * @param[out] error
5313  *   Pointer to error structure.
5314  *
5315  * @return
5316  *   0 on success, a negative errno value otherwise and rte_errno is set.
5317  */
5318 static int
5319 flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,
5320                                          const struct rte_flow_action *action,
5321                                          const uint64_t item_flags,
5322                                          struct rte_flow_error *error)
5323 {
5324         int ret = 0;
5325
5326         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
5327         if (!ret) {
5328                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
5329                         return rte_flow_error_set(error, EINVAL,
5330                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5331                                                   NULL,
5332                                                   "no ipv6 item in pattern");
5333         }
5334         return ret;
5335 }
5336
5337 /**
5338  * Match modify-header resource.
5339  *
5340  * @param list
5341  *   Pointer to the hash list.
5342  * @param entry
5343  *   Pointer to exist resource entry object.
5344  * @param key
5345  *   Key of the new entry.
5346  * @param ctx
5347  *   Pointer to new modify-header resource.
5348  *
5349  * @return
5350  *   0 on matching, non-zero otherwise.
5351  */
5352 int
5353 flow_dv_modify_match_cb(struct mlx5_hlist *list __rte_unused,
5354                         struct mlx5_hlist_entry *entry,
5355                         uint64_t key __rte_unused, void *cb_ctx)
5356 {
5357         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5358         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5359         struct mlx5_flow_dv_modify_hdr_resource *resource =
5360                         container_of(entry, typeof(*resource), entry);
5361         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5362
5363         key_len += ref->actions_num * sizeof(ref->actions[0]);
5364         return ref->actions_num != resource->actions_num ||
5365                memcmp(&ref->ft_type, &resource->ft_type, key_len);
5366 }
5367
5368 struct mlx5_hlist_entry *
5369 flow_dv_modify_create_cb(struct mlx5_hlist *list, uint64_t key __rte_unused,
5370                          void *cb_ctx)
5371 {
5372         struct mlx5_dev_ctx_shared *sh = list->ctx;
5373         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5374         struct mlx5dv_dr_domain *ns;
5375         struct mlx5_flow_dv_modify_hdr_resource *entry;
5376         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
5377         int ret;
5378         uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
5379         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
5380
5381         entry = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*entry) + data_len, 0,
5382                             SOCKET_ID_ANY);
5383         if (!entry) {
5384                 rte_flow_error_set(ctx->error, ENOMEM,
5385                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5386                                    "cannot allocate resource memory");
5387                 return NULL;
5388         }
5389         rte_memcpy(&entry->ft_type,
5390                    RTE_PTR_ADD(ref, offsetof(typeof(*ref), ft_type)),
5391                    key_len + data_len);
5392         if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
5393                 ns = sh->fdb_domain;
5394         else if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
5395                 ns = sh->tx_domain;
5396         else
5397                 ns = sh->rx_domain;
5398         ret = mlx5_flow_os_create_flow_action_modify_header
5399                                         (sh->ctx, ns, entry,
5400                                          data_len, &entry->action);
5401         if (ret) {
5402                 mlx5_free(entry);
5403                 rte_flow_error_set(ctx->error, ENOMEM,
5404                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5405                                    NULL, "cannot create modification action");
5406                 return NULL;
5407         }
5408         return &entry->entry;
5409 }
5410
5411 /**
5412  * Validate the sample action.
5413  *
5414  * @param[in, out] action_flags
5415  *   Holds the actions detected until now.
5416  * @param[in] action
5417  *   Pointer to the sample action.
5418  * @param[in] dev
5419  *   Pointer to the Ethernet device structure.
5420  * @param[in] attr
5421  *   Attributes of flow that includes this action.
5422  * @param[in] item_flags
5423  *   Holds the items detected.
5424  * @param[in] rss
5425  *   Pointer to the RSS action.
5426  * @param[out] sample_rss
5427  *   Pointer to the RSS action in sample action list.
5428  * @param[out] count
5429  *   Pointer to the COUNT action in sample action list.
5430  * @param[out] fdb_mirror_limit
5431  *   Pointer to the FDB mirror limitation flag.
5432  * @param[out] error
5433  *   Pointer to error structure.
5434  *
5435  * @return
5436  *   0 on success, a negative errno value otherwise and rte_errno is set.
5437  */
5438 static int
5439 flow_dv_validate_action_sample(uint64_t *action_flags,
5440                                const struct rte_flow_action *action,
5441                                struct rte_eth_dev *dev,
5442                                const struct rte_flow_attr *attr,
5443                                uint64_t item_flags,
5444                                const struct rte_flow_action_rss *rss,
5445                                const struct rte_flow_action_rss **sample_rss,
5446                                const struct rte_flow_action_count **count,
5447                                int *fdb_mirror_limit,
5448                                struct rte_flow_error *error)
5449 {
5450         struct mlx5_priv *priv = dev->data->dev_private;
5451         struct mlx5_dev_config *dev_conf = &priv->config;
5452         const struct rte_flow_action_sample *sample = action->conf;
5453         const struct rte_flow_action *act;
5454         uint64_t sub_action_flags = 0;
5455         uint16_t queue_index = 0xFFFF;
5456         int actions_n = 0;
5457         int ret;
5458
5459         if (!sample)
5460                 return rte_flow_error_set(error, EINVAL,
5461                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5462                                           "configuration cannot be NULL");
5463         if (sample->ratio == 0)
5464                 return rte_flow_error_set(error, EINVAL,
5465                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5466                                           "ratio value starts from 1");
5467         if (!priv->config.devx || (sample->ratio > 0 && !priv->sampler_en))
5468                 return rte_flow_error_set(error, ENOTSUP,
5469                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5470                                           NULL,
5471                                           "sample action not supported");
5472         if (*action_flags & MLX5_FLOW_ACTION_SAMPLE)
5473                 return rte_flow_error_set(error, EINVAL,
5474                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5475                                           "Multiple sample actions not "
5476                                           "supported");
5477         if (*action_flags & MLX5_FLOW_ACTION_METER)
5478                 return rte_flow_error_set(error, EINVAL,
5479                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5480                                           "wrong action order, meter should "
5481                                           "be after sample action");
5482         if (*action_flags & MLX5_FLOW_ACTION_JUMP)
5483                 return rte_flow_error_set(error, EINVAL,
5484                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
5485                                           "wrong action order, jump should "
5486                                           "be after sample action");
5487         act = sample->actions;
5488         for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
5489                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
5490                         return rte_flow_error_set(error, ENOTSUP,
5491                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5492                                                   act, "too many actions");
5493                 switch (act->type) {
5494                 case RTE_FLOW_ACTION_TYPE_QUEUE:
5495                         ret = mlx5_flow_validate_action_queue(act,
5496                                                               sub_action_flags,
5497                                                               dev,
5498                                                               attr, error);
5499                         if (ret < 0)
5500                                 return ret;
5501                         queue_index = ((const struct rte_flow_action_queue *)
5502                                                         (act->conf))->index;
5503                         sub_action_flags |= MLX5_FLOW_ACTION_QUEUE;
5504                         ++actions_n;
5505                         break;
5506                 case RTE_FLOW_ACTION_TYPE_RSS:
5507                         *sample_rss = act->conf;
5508                         ret = mlx5_flow_validate_action_rss(act,
5509                                                             sub_action_flags,
5510                                                             dev, attr,
5511                                                             item_flags,
5512                                                             error);
5513                         if (ret < 0)
5514                                 return ret;
5515                         if (rss && *sample_rss &&
5516                             ((*sample_rss)->level != rss->level ||
5517                             (*sample_rss)->types != rss->types))
5518                                 return rte_flow_error_set(error, ENOTSUP,
5519                                         RTE_FLOW_ERROR_TYPE_ACTION,
5520                                         NULL,
5521                                         "Can't use the different RSS types "
5522                                         "or level in the same flow");
5523                         if (*sample_rss != NULL && (*sample_rss)->queue_num)
5524                                 queue_index = (*sample_rss)->queue[0];
5525                         sub_action_flags |= MLX5_FLOW_ACTION_RSS;
5526                         ++actions_n;
5527                         break;
5528                 case RTE_FLOW_ACTION_TYPE_MARK:
5529                         ret = flow_dv_validate_action_mark(dev, act,
5530                                                            sub_action_flags,
5531                                                            attr, error);
5532                         if (ret < 0)
5533                                 return ret;
5534                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
5535                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK |
5536                                                 MLX5_FLOW_ACTION_MARK_EXT;
5537                         else
5538                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK;
5539                         ++actions_n;
5540                         break;
5541                 case RTE_FLOW_ACTION_TYPE_COUNT:
5542                         ret = flow_dv_validate_action_count
5543                                 (dev, is_shared_action_count(act),
5544                                  *action_flags | sub_action_flags,
5545                                  error);
5546                         if (ret < 0)
5547                                 return ret;
5548                         *count = act->conf;
5549                         sub_action_flags |= MLX5_FLOW_ACTION_COUNT;
5550                         *action_flags |= MLX5_FLOW_ACTION_COUNT;
5551                         ++actions_n;
5552                         break;
5553                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
5554                         ret = flow_dv_validate_action_port_id(dev,
5555                                                               sub_action_flags,
5556                                                               act,
5557                                                               attr,
5558                                                               error);
5559                         if (ret)
5560                                 return ret;
5561                         sub_action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5562                         ++actions_n;
5563                         break;
5564                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5565                         ret = flow_dv_validate_action_raw_encap_decap
5566                                 (dev, NULL, act->conf, attr, &sub_action_flags,
5567                                  &actions_n, action, item_flags, error);
5568                         if (ret < 0)
5569                                 return ret;
5570                         ++actions_n;
5571                         break;
5572                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5573                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5574                         ret = flow_dv_validate_action_l2_encap(dev,
5575                                                                sub_action_flags,
5576                                                                act, attr,
5577                                                                error);
5578                         if (ret < 0)
5579                                 return ret;
5580                         sub_action_flags |= MLX5_FLOW_ACTION_ENCAP;
5581                         ++actions_n;
5582                         break;
5583                 default:
5584                         return rte_flow_error_set(error, ENOTSUP,
5585                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5586                                                   NULL,
5587                                                   "Doesn't support optional "
5588                                                   "action");
5589                 }
5590         }
5591         if (attr->ingress && !attr->transfer) {
5592                 if (!(sub_action_flags & (MLX5_FLOW_ACTION_QUEUE |
5593                                           MLX5_FLOW_ACTION_RSS)))
5594                         return rte_flow_error_set(error, EINVAL,
5595                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5596                                                   NULL,
5597                                                   "Ingress must has a dest "
5598                                                   "QUEUE for Sample");
5599         } else if (attr->egress && !attr->transfer) {
5600                 return rte_flow_error_set(error, ENOTSUP,
5601                                           RTE_FLOW_ERROR_TYPE_ACTION,
5602                                           NULL,
5603                                           "Sample Only support Ingress "
5604                                           "or E-Switch");
5605         } else if (sample->actions->type != RTE_FLOW_ACTION_TYPE_END) {
5606                 MLX5_ASSERT(attr->transfer);
5607                 if (sample->ratio > 1)
5608                         return rte_flow_error_set(error, ENOTSUP,
5609                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5610                                                   NULL,
5611                                                   "E-Switch doesn't support "
5612                                                   "any optional action "
5613                                                   "for sampling");
5614                 if (sub_action_flags & MLX5_FLOW_ACTION_QUEUE)
5615                         return rte_flow_error_set(error, ENOTSUP,
5616                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5617                                                   NULL,
5618                                                   "unsupported action QUEUE");
5619                 if (sub_action_flags & MLX5_FLOW_ACTION_RSS)
5620                         return rte_flow_error_set(error, ENOTSUP,
5621                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5622                                                   NULL,
5623                                                   "unsupported action QUEUE");
5624                 if (!(sub_action_flags & MLX5_FLOW_ACTION_PORT_ID))
5625                         return rte_flow_error_set(error, EINVAL,
5626                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5627                                                   NULL,
5628                                                   "E-Switch must has a dest "
5629                                                   "port for mirroring");
5630                 if (!priv->config.hca_attr.reg_c_preserve &&
5631                      priv->representor_id != UINT16_MAX)
5632                         *fdb_mirror_limit = 1;
5633         }
5634         /* Continue validation for Xcap actions.*/
5635         if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) &&
5636             (queue_index == 0xFFFF ||
5637              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
5638                 if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
5639                      MLX5_FLOW_XCAP_ACTIONS)
5640                         return rte_flow_error_set(error, ENOTSUP,
5641                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5642                                                   NULL, "encap and decap "
5643                                                   "combination aren't "
5644                                                   "supported");
5645                 if (!attr->transfer && attr->ingress && (sub_action_flags &
5646                                                         MLX5_FLOW_ACTION_ENCAP))
5647                         return rte_flow_error_set(error, ENOTSUP,
5648                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5649                                                   NULL, "encap is not supported"
5650                                                   " for ingress traffic");
5651         }
5652         return 0;
5653 }
5654
5655 /**
5656  * Find existing modify-header resource or create and register a new one.
5657  *
5658  * @param dev[in, out]
5659  *   Pointer to rte_eth_dev structure.
5660  * @param[in, out] resource
5661  *   Pointer to modify-header resource.
5662  * @parm[in, out] dev_flow
5663  *   Pointer to the dev_flow.
5664  * @param[out] error
5665  *   pointer to error structure.
5666  *
5667  * @return
5668  *   0 on success otherwise -errno and errno is set.
5669  */
5670 static int
5671 flow_dv_modify_hdr_resource_register
5672                         (struct rte_eth_dev *dev,
5673                          struct mlx5_flow_dv_modify_hdr_resource *resource,
5674                          struct mlx5_flow *dev_flow,
5675                          struct rte_flow_error *error)
5676 {
5677         struct mlx5_priv *priv = dev->data->dev_private;
5678         struct mlx5_dev_ctx_shared *sh = priv->sh;
5679         uint32_t key_len = sizeof(*resource) -
5680                            offsetof(typeof(*resource), ft_type) +
5681                            resource->actions_num * sizeof(resource->actions[0]);
5682         struct mlx5_hlist_entry *entry;
5683         struct mlx5_flow_cb_ctx ctx = {
5684                 .error = error,
5685                 .data = resource,
5686         };
5687         uint64_t key64;
5688
5689         resource->root = !dev_flow->dv.group;
5690         if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
5691                                                                 resource->root))
5692                 return rte_flow_error_set(error, EOVERFLOW,
5693                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5694                                           "too many modify header items");
5695         key64 = __rte_raw_cksum(&resource->ft_type, key_len, 0);
5696         entry = mlx5_hlist_register(sh->modify_cmds, key64, &ctx);
5697         if (!entry)
5698                 return -rte_errno;
5699         resource = container_of(entry, typeof(*resource), entry);
5700         dev_flow->handle->dvh.modify_hdr = resource;
5701         return 0;
5702 }
5703
5704 /**
5705  * Get DV flow counter by index.
5706  *
5707  * @param[in] dev
5708  *   Pointer to the Ethernet device structure.
5709  * @param[in] idx
5710  *   mlx5 flow counter index in the container.
5711  * @param[out] ppool
5712  *   mlx5 flow counter pool in the container.
5713  *
5714  * @return
5715  *   Pointer to the counter, NULL otherwise.
5716  */
5717 static struct mlx5_flow_counter *
5718 flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
5719                            uint32_t idx,
5720                            struct mlx5_flow_counter_pool **ppool)
5721 {
5722         struct mlx5_priv *priv = dev->data->dev_private;
5723         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5724         struct mlx5_flow_counter_pool *pool;
5725
5726         /* Decrease to original index and clear shared bit. */
5727         idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
5728         MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cmng->n);
5729         pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
5730         MLX5_ASSERT(pool);
5731         if (ppool)
5732                 *ppool = pool;
5733         return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
5734 }
5735
5736 /**
5737  * Check the devx counter belongs to the pool.
5738  *
5739  * @param[in] pool
5740  *   Pointer to the counter pool.
5741  * @param[in] id
5742  *   The counter devx ID.
5743  *
5744  * @return
5745  *   True if counter belongs to the pool, false otherwise.
5746  */
5747 static bool
5748 flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id)
5749 {
5750         int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
5751                    MLX5_COUNTERS_PER_POOL;
5752
5753         if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
5754                 return true;
5755         return false;
5756 }
5757
5758 /**
5759  * Get a pool by devx counter ID.
5760  *
5761  * @param[in] cmng
5762  *   Pointer to the counter management.
5763  * @param[in] id
5764  *   The counter devx ID.
5765  *
5766  * @return
5767  *   The counter pool pointer if exists, NULL otherwise,
5768  */
5769 static struct mlx5_flow_counter_pool *
5770 flow_dv_find_pool_by_id(struct mlx5_flow_counter_mng *cmng, int id)
5771 {
5772         uint32_t i;
5773         struct mlx5_flow_counter_pool *pool = NULL;
5774
5775         rte_spinlock_lock(&cmng->pool_update_sl);
5776         /* Check last used pool. */
5777         if (cmng->last_pool_idx != POOL_IDX_INVALID &&
5778             flow_dv_is_counter_in_pool(cmng->pools[cmng->last_pool_idx], id)) {
5779                 pool = cmng->pools[cmng->last_pool_idx];
5780                 goto out;
5781         }
5782         /* ID out of range means no suitable pool in the container. */
5783         if (id > cmng->max_id || id < cmng->min_id)
5784                 goto out;
5785         /*
5786          * Find the pool from the end of the container, since mostly counter
5787          * ID is sequence increasing, and the last pool should be the needed
5788          * one.
5789          */
5790         i = cmng->n_valid;
5791         while (i--) {
5792                 struct mlx5_flow_counter_pool *pool_tmp = cmng->pools[i];
5793
5794                 if (flow_dv_is_counter_in_pool(pool_tmp, id)) {
5795                         pool = pool_tmp;
5796                         break;
5797                 }
5798         }
5799 out:
5800         rte_spinlock_unlock(&cmng->pool_update_sl);
5801         return pool;
5802 }
5803
5804 /**
5805  * Resize a counter container.
5806  *
5807  * @param[in] dev
5808  *   Pointer to the Ethernet device structure.
5809  *
5810  * @return
5811  *   0 on success, otherwise negative errno value and rte_errno is set.
5812  */
5813 static int
5814 flow_dv_container_resize(struct rte_eth_dev *dev)
5815 {
5816         struct mlx5_priv *priv = dev->data->dev_private;
5817         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5818         void *old_pools = cmng->pools;
5819         uint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE;
5820         uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
5821         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
5822
5823         if (!pools) {
5824                 rte_errno = ENOMEM;
5825                 return -ENOMEM;
5826         }
5827         if (old_pools)
5828                 memcpy(pools, old_pools, cmng->n *
5829                                        sizeof(struct mlx5_flow_counter_pool *));
5830         cmng->n = resize;
5831         cmng->pools = pools;
5832         if (old_pools)
5833                 mlx5_free(old_pools);
5834         return 0;
5835 }
5836
5837 /**
5838  * Query a devx flow counter.
5839  *
5840  * @param[in] dev
5841  *   Pointer to the Ethernet device structure.
5842  * @param[in] counter
5843  *   Index to the flow counter.
5844  * @param[out] pkts
5845  *   The statistics value of packets.
5846  * @param[out] bytes
5847  *   The statistics value of bytes.
5848  *
5849  * @return
5850  *   0 on success, otherwise a negative errno value and rte_errno is set.
5851  */
5852 static inline int
5853 _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
5854                      uint64_t *bytes)
5855 {
5856         struct mlx5_priv *priv = dev->data->dev_private;
5857         struct mlx5_flow_counter_pool *pool = NULL;
5858         struct mlx5_flow_counter *cnt;
5859         int offset;
5860
5861         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
5862         MLX5_ASSERT(pool);
5863         if (priv->sh->cmng.counter_fallback)
5864                 return mlx5_devx_cmd_flow_counter_query(cnt->dcs_when_active, 0,
5865                                         0, pkts, bytes, 0, NULL, NULL, 0);
5866         rte_spinlock_lock(&pool->sl);
5867         if (!pool->raw) {
5868                 *pkts = 0;
5869                 *bytes = 0;
5870         } else {
5871                 offset = MLX5_CNT_ARRAY_IDX(pool, cnt);
5872                 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
5873                 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
5874         }
5875         rte_spinlock_unlock(&pool->sl);
5876         return 0;
5877 }
5878
5879 /**
5880  * Create and initialize a new counter pool.
5881  *
5882  * @param[in] dev
5883  *   Pointer to the Ethernet device structure.
5884  * @param[out] dcs
5885  *   The devX counter handle.
5886  * @param[in] age
5887  *   Whether the pool is for counter that was allocated for aging.
5888  * @param[in/out] cont_cur
5889  *   Pointer to the container pointer, it will be update in pool resize.
5890  *
5891  * @return
5892  *   The pool container pointer on success, NULL otherwise and rte_errno is set.
5893  */
5894 static struct mlx5_flow_counter_pool *
5895 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
5896                     uint32_t age)
5897 {
5898         struct mlx5_priv *priv = dev->data->dev_private;
5899         struct mlx5_flow_counter_pool *pool;
5900         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5901         bool fallback = priv->sh->cmng.counter_fallback;
5902         uint32_t size = sizeof(*pool);
5903
5904         size += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE;
5905         size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * MLX5_AGE_SIZE);
5906         pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
5907         if (!pool) {
5908                 rte_errno = ENOMEM;
5909                 return NULL;
5910         }
5911         pool->raw = NULL;
5912         pool->is_aged = !!age;
5913         pool->query_gen = 0;
5914         pool->min_dcs = dcs;
5915         rte_spinlock_init(&pool->sl);
5916         rte_spinlock_init(&pool->csl);
5917         TAILQ_INIT(&pool->counters[0]);
5918         TAILQ_INIT(&pool->counters[1]);
5919         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
5920         rte_spinlock_lock(&cmng->pool_update_sl);
5921         pool->index = cmng->n_valid;
5922         if (pool->index == cmng->n && flow_dv_container_resize(dev)) {
5923                 mlx5_free(pool);
5924                 rte_spinlock_unlock(&cmng->pool_update_sl);
5925                 return NULL;
5926         }
5927         cmng->pools[pool->index] = pool;
5928         cmng->n_valid++;
5929         if (unlikely(fallback)) {
5930                 int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
5931
5932                 if (base < cmng->min_id)
5933                         cmng->min_id = base;
5934                 if (base > cmng->max_id)
5935                         cmng->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
5936                 cmng->last_pool_idx = pool->index;
5937         }
5938         rte_spinlock_unlock(&cmng->pool_update_sl);
5939         return pool;
5940 }
5941
5942 /**
5943  * Prepare a new counter and/or a new counter pool.
5944  *
5945  * @param[in] dev
5946  *   Pointer to the Ethernet device structure.
5947  * @param[out] cnt_free
5948  *   Where to put the pointer of a new counter.
5949  * @param[in] age
5950  *   Whether the pool is for counter that was allocated for aging.
5951  *
5952  * @return
5953  *   The counter pool pointer and @p cnt_free is set on success,
5954  *   NULL otherwise and rte_errno is set.
5955  */
5956 static struct mlx5_flow_counter_pool *
5957 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
5958                              struct mlx5_flow_counter **cnt_free,
5959                              uint32_t age)
5960 {
5961         struct mlx5_priv *priv = dev->data->dev_private;
5962         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5963         struct mlx5_flow_counter_pool *pool;
5964         struct mlx5_counters tmp_tq;
5965         struct mlx5_devx_obj *dcs = NULL;
5966         struct mlx5_flow_counter *cnt;
5967         enum mlx5_counter_type cnt_type =
5968                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
5969         bool fallback = priv->sh->cmng.counter_fallback;
5970         uint32_t i;
5971
5972         if (fallback) {
5973                 /* bulk_bitmap must be 0 for single counter allocation. */
5974                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
5975                 if (!dcs)
5976                         return NULL;
5977                 pool = flow_dv_find_pool_by_id(cmng, dcs->id);
5978                 if (!pool) {
5979                         pool = flow_dv_pool_create(dev, dcs, age);
5980                         if (!pool) {
5981                                 mlx5_devx_cmd_destroy(dcs);
5982                                 return NULL;
5983                         }
5984                 }
5985                 i = dcs->id % MLX5_COUNTERS_PER_POOL;
5986                 cnt = MLX5_POOL_GET_CNT(pool, i);
5987                 cnt->pool = pool;
5988                 cnt->dcs_when_free = dcs;
5989                 *cnt_free = cnt;
5990                 return pool;
5991         }
5992         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
5993         if (!dcs) {
5994                 rte_errno = ENODATA;
5995                 return NULL;
5996         }
5997         pool = flow_dv_pool_create(dev, dcs, age);
5998         if (!pool) {
5999                 mlx5_devx_cmd_destroy(dcs);
6000                 return NULL;
6001         }
6002         TAILQ_INIT(&tmp_tq);
6003         for (i = 1; i < MLX5_COUNTERS_PER_POOL; ++i) {
6004                 cnt = MLX5_POOL_GET_CNT(pool, i);
6005                 cnt->pool = pool;
6006                 TAILQ_INSERT_HEAD(&tmp_tq, cnt, next);
6007         }
6008         rte_spinlock_lock(&cmng->csl[cnt_type]);
6009         TAILQ_CONCAT(&cmng->counters[cnt_type], &tmp_tq, next);
6010         rte_spinlock_unlock(&cmng->csl[cnt_type]);
6011         *cnt_free = MLX5_POOL_GET_CNT(pool, 0);
6012         (*cnt_free)->pool = pool;
6013         return pool;
6014 }
6015
6016 /**
6017  * Allocate a flow counter.
6018  *
6019  * @param[in] dev
6020  *   Pointer to the Ethernet device structure.
6021  * @param[in] age
6022  *   Whether the counter was allocated for aging.
6023  *
6024  * @return
6025  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
6026  */
6027 static uint32_t
6028 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age)
6029 {
6030         struct mlx5_priv *priv = dev->data->dev_private;
6031         struct mlx5_flow_counter_pool *pool = NULL;
6032         struct mlx5_flow_counter *cnt_free = NULL;
6033         bool fallback = priv->sh->cmng.counter_fallback;
6034         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
6035         enum mlx5_counter_type cnt_type =
6036                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
6037         uint32_t cnt_idx;
6038
6039         if (!priv->config.devx) {
6040                 rte_errno = ENOTSUP;
6041                 return 0;
6042         }
6043         /* Get free counters from container. */
6044         rte_spinlock_lock(&cmng->csl[cnt_type]);
6045         cnt_free = TAILQ_FIRST(&cmng->counters[cnt_type]);
6046         if (cnt_free)
6047                 TAILQ_REMOVE(&cmng->counters[cnt_type], cnt_free, next);
6048         rte_spinlock_unlock(&cmng->csl[cnt_type]);
6049         if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free, age))
6050                 goto err;
6051         pool = cnt_free->pool;
6052         if (fallback)
6053                 cnt_free->dcs_when_active = cnt_free->dcs_when_free;
6054         /* Create a DV counter action only in the first time usage. */
6055         if (!cnt_free->action) {
6056                 uint16_t offset;
6057                 struct mlx5_devx_obj *dcs;
6058                 int ret;
6059
6060                 if (!fallback) {
6061                         offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free);
6062                         dcs = pool->min_dcs;
6063                 } else {
6064                         offset = 0;
6065                         dcs = cnt_free->dcs_when_free;
6066                 }
6067                 ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset,
6068                                                             &cnt_free->action);
6069                 if (ret) {
6070                         rte_errno = errno;
6071                         goto err;
6072                 }
6073         }
6074         cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
6075                                 MLX5_CNT_ARRAY_IDX(pool, cnt_free));
6076         /* Update the counter reset values. */
6077         if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
6078                                  &cnt_free->bytes))
6079                 goto err;
6080         if (!fallback && !priv->sh->cmng.query_thread_on)
6081                 /* Start the asynchronous batch query by the host thread. */
6082                 mlx5_set_query_alarm(priv->sh);
6083         /*
6084          * When the count action isn't shared (by ID), shared_info field is
6085          * used for indirect action API's refcnt.
6086          * When the counter action is not shared neither by ID nor by indirect
6087          * action API, shared info must be 1.
6088          */
6089         cnt_free->shared_info.refcnt = 1;
6090         return cnt_idx;
6091 err:
6092         if (cnt_free) {
6093                 cnt_free->pool = pool;
6094                 if (fallback)
6095                         cnt_free->dcs_when_free = cnt_free->dcs_when_active;
6096                 rte_spinlock_lock(&cmng->csl[cnt_type]);
6097                 TAILQ_INSERT_TAIL(&cmng->counters[cnt_type], cnt_free, next);
6098                 rte_spinlock_unlock(&cmng->csl[cnt_type]);
6099         }
6100         return 0;
6101 }
6102
6103 /**
6104  * Allocate a shared flow counter.
6105  *
6106  * @param[in] ctx
6107  *   Pointer to the shared counter configuration.
6108  * @param[in] data
6109  *   Pointer to save the allocated counter index.
6110  *
6111  * @return
6112  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
6113  */
6114
6115 static int32_t
6116 flow_dv_counter_alloc_shared_cb(void *ctx, union mlx5_l3t_data *data)
6117 {
6118         struct mlx5_shared_counter_conf *conf = ctx;
6119         struct rte_eth_dev *dev = conf->dev;
6120         struct mlx5_flow_counter *cnt;
6121
6122         data->dword = flow_dv_counter_alloc(dev, 0);
6123         data->dword |= MLX5_CNT_SHARED_OFFSET;
6124         cnt = flow_dv_counter_get_by_idx(dev, data->dword, NULL);
6125         cnt->shared_info.id = conf->id;
6126         return 0;
6127 }
6128
6129 /**
6130  * Get a shared flow counter.
6131  *
6132  * @param[in] dev
6133  *   Pointer to the Ethernet device structure.
6134  * @param[in] id
6135  *   Counter identifier.
6136  *
6137  * @return
6138  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
6139  */
6140 static uint32_t
6141 flow_dv_counter_get_shared(struct rte_eth_dev *dev, uint32_t id)
6142 {
6143         struct mlx5_priv *priv = dev->data->dev_private;
6144         struct mlx5_shared_counter_conf conf = {
6145                 .dev = dev,
6146                 .id = id,
6147         };
6148         union mlx5_l3t_data data = {
6149                 .dword = 0,
6150         };
6151
6152         mlx5_l3t_prepare_entry(priv->sh->cnt_id_tbl, id, &data,
6153                                flow_dv_counter_alloc_shared_cb, &conf);
6154         return data.dword;
6155 }
6156
6157 /**
6158  * Get age param from counter index.
6159  *
6160  * @param[in] dev
6161  *   Pointer to the Ethernet device structure.
6162  * @param[in] counter
6163  *   Index to the counter handler.
6164  *
6165  * @return
6166  *   The aging parameter specified for the counter index.
6167  */
6168 static struct mlx5_age_param*
6169 flow_dv_counter_idx_get_age(struct rte_eth_dev *dev,
6170                                 uint32_t counter)
6171 {
6172         struct mlx5_flow_counter *cnt;
6173         struct mlx5_flow_counter_pool *pool = NULL;
6174
6175         flow_dv_counter_get_by_idx(dev, counter, &pool);
6176         counter = (counter - 1) % MLX5_COUNTERS_PER_POOL;
6177         cnt = MLX5_POOL_GET_CNT(pool, counter);
6178         return MLX5_CNT_TO_AGE(cnt);
6179 }
6180
6181 /**
6182  * Remove a flow counter from aged counter list.
6183  *
6184  * @param[in] dev
6185  *   Pointer to the Ethernet device structure.
6186  * @param[in] counter
6187  *   Index to the counter handler.
6188  * @param[in] cnt
6189  *   Pointer to the counter handler.
6190  */
6191 static void
6192 flow_dv_counter_remove_from_age(struct rte_eth_dev *dev,
6193                                 uint32_t counter, struct mlx5_flow_counter *cnt)
6194 {
6195         struct mlx5_age_info *age_info;
6196         struct mlx5_age_param *age_param;
6197         struct mlx5_priv *priv = dev->data->dev_private;
6198         uint16_t expected = AGE_CANDIDATE;
6199
6200         age_info = GET_PORT_AGE_INFO(priv);
6201         age_param = flow_dv_counter_idx_get_age(dev, counter);
6202         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
6203                                          AGE_FREE, false, __ATOMIC_RELAXED,
6204                                          __ATOMIC_RELAXED)) {
6205                 /**
6206                  * We need the lock even it is age timeout,
6207                  * since counter may still in process.
6208                  */
6209                 rte_spinlock_lock(&age_info->aged_sl);
6210                 TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
6211                 rte_spinlock_unlock(&age_info->aged_sl);
6212                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
6213         }
6214 }
6215
6216 /**
6217  * Release a flow counter.
6218  *
6219  * @param[in] dev
6220  *   Pointer to the Ethernet device structure.
6221  * @param[in] counter
6222  *   Index to the counter handler.
6223  */
6224 static void
6225 flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter)
6226 {
6227         struct mlx5_priv *priv = dev->data->dev_private;
6228         struct mlx5_flow_counter_pool *pool = NULL;
6229         struct mlx5_flow_counter *cnt;
6230         enum mlx5_counter_type cnt_type;
6231
6232         if (!counter)
6233                 return;
6234         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
6235         MLX5_ASSERT(pool);
6236         if (pool->is_aged) {
6237                 flow_dv_counter_remove_from_age(dev, counter, cnt);
6238         } else {
6239                 /*
6240                  * If the counter action is shared by ID, the l3t_clear_entry
6241                  * function reduces its references counter. If after the
6242                  * reduction the action is still referenced, the function
6243                  * returns here and does not release it.
6244                  */
6245                 if (IS_LEGACY_SHARED_CNT(counter) &&
6246                     mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl,
6247                                          cnt->shared_info.id))
6248                         return;
6249                 /*
6250                  * If the counter action is shared by indirect action API,
6251                  * the atomic function reduces its references counter.
6252                  * If after the reduction the action is still referenced, the
6253                  * function returns here and does not release it.
6254                  * When the counter action is not shared neither by ID nor by
6255                  * indirect action API, shared info is 1 before the reduction,
6256                  * so this condition is failed and function doesn't return here.
6257                  */
6258                 if (!IS_LEGACY_SHARED_CNT(counter) &&
6259                     __atomic_sub_fetch(&cnt->shared_info.refcnt, 1,
6260                                        __ATOMIC_RELAXED))
6261                         return;
6262         }
6263         cnt->pool = pool;
6264         /*
6265          * Put the counter back to list to be updated in none fallback mode.
6266          * Currently, we are using two list alternately, while one is in query,
6267          * add the freed counter to the other list based on the pool query_gen
6268          * value. After query finishes, add counter the list to the global
6269          * container counter list. The list changes while query starts. In
6270          * this case, lock will not be needed as query callback and release
6271          * function both operate with the different list.
6272          */
6273         if (!priv->sh->cmng.counter_fallback) {
6274                 rte_spinlock_lock(&pool->csl);
6275                 TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
6276                 rte_spinlock_unlock(&pool->csl);
6277         } else {
6278                 cnt->dcs_when_free = cnt->dcs_when_active;
6279                 cnt_type = pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
6280                                            MLX5_COUNTER_TYPE_ORIGIN;
6281                 rte_spinlock_lock(&priv->sh->cmng.csl[cnt_type]);
6282                 TAILQ_INSERT_TAIL(&priv->sh->cmng.counters[cnt_type],
6283                                   cnt, next);
6284                 rte_spinlock_unlock(&priv->sh->cmng.csl[cnt_type]);
6285         }
6286 }
6287
6288 /**
6289  * Resize a meter id container.
6290  *
6291  * @param[in] dev
6292  *   Pointer to the Ethernet device structure.
6293  *
6294  * @return
6295  *   0 on success, otherwise negative errno value and rte_errno is set.
6296  */
6297 static int
6298 flow_dv_mtr_container_resize(struct rte_eth_dev *dev)
6299 {
6300         struct mlx5_priv *priv = dev->data->dev_private;
6301         struct mlx5_aso_mtr_pools_mng *pools_mng =
6302                                 &priv->sh->mtrmng->pools_mng;
6303         void *old_pools = pools_mng->pools;
6304         uint32_t resize = pools_mng->n + MLX5_MTRS_CONTAINER_RESIZE;
6305         uint32_t mem_size = sizeof(struct mlx5_aso_mtr_pool *) * resize;
6306         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
6307
6308         if (!pools) {
6309                 rte_errno = ENOMEM;
6310                 return -ENOMEM;
6311         }
6312         if (!pools_mng->n)
6313                 if (mlx5_aso_queue_init(priv->sh, ASO_OPC_MOD_POLICER)) {
6314                         mlx5_free(pools);
6315                         return -ENOMEM;
6316                 }
6317         if (old_pools)
6318                 memcpy(pools, old_pools, pools_mng->n *
6319                                        sizeof(struct mlx5_aso_mtr_pool *));
6320         pools_mng->n = resize;
6321         pools_mng->pools = pools;
6322         if (old_pools)
6323                 mlx5_free(old_pools);
6324         return 0;
6325 }
6326
6327 /**
6328  * Prepare a new meter and/or a new meter pool.
6329  *
6330  * @param[in] dev
6331  *   Pointer to the Ethernet device structure.
6332  * @param[out] mtr_free
6333  *   Where to put the pointer of a new meter.g.
6334  *
6335  * @return
6336  *   The meter pool pointer and @mtr_free is set on success,
6337  *   NULL otherwise and rte_errno is set.
6338  */
6339 static struct mlx5_aso_mtr_pool *
6340 flow_dv_mtr_pool_create(struct rte_eth_dev *dev,
6341                              struct mlx5_aso_mtr **mtr_free)
6342 {
6343         struct mlx5_priv *priv = dev->data->dev_private;
6344         struct mlx5_aso_mtr_pools_mng *pools_mng =
6345                                 &priv->sh->mtrmng->pools_mng;
6346         struct mlx5_aso_mtr_pool *pool = NULL;
6347         struct mlx5_devx_obj *dcs = NULL;
6348         uint32_t i;
6349         uint32_t log_obj_size;
6350
6351         log_obj_size = rte_log2_u32(MLX5_ASO_MTRS_PER_POOL >> 1);
6352         dcs = mlx5_devx_cmd_create_flow_meter_aso_obj(priv->sh->ctx,
6353                         priv->sh->pdn, log_obj_size);
6354         if (!dcs) {
6355                 rte_errno = ENODATA;
6356                 return NULL;
6357         }
6358         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
6359         if (!pool) {
6360                 rte_errno = ENOMEM;
6361                 claim_zero(mlx5_devx_cmd_destroy(dcs));
6362                 return NULL;
6363         }
6364         pool->devx_obj = dcs;
6365         pool->index = pools_mng->n_valid;
6366         if (pool->index == pools_mng->n && flow_dv_mtr_container_resize(dev)) {
6367                 mlx5_free(pool);
6368                 claim_zero(mlx5_devx_cmd_destroy(dcs));
6369                 return NULL;
6370         }
6371         pools_mng->pools[pool->index] = pool;
6372         pools_mng->n_valid++;
6373         for (i = 1; i < MLX5_ASO_MTRS_PER_POOL; ++i) {
6374                 pool->mtrs[i].offset = i;
6375                 LIST_INSERT_HEAD(&pools_mng->meters,
6376                                                 &pool->mtrs[i], next);
6377         }
6378         pool->mtrs[0].offset = 0;
6379         *mtr_free = &pool->mtrs[0];
6380         return pool;
6381 }
6382
6383 /**
6384  * Release a flow meter into pool.
6385  *
6386  * @param[in] dev
6387  *   Pointer to the Ethernet device structure.
6388  * @param[in] mtr_idx
6389  *   Index to aso flow meter.
6390  */
6391 static void
6392 flow_dv_aso_mtr_release_to_pool(struct rte_eth_dev *dev, uint32_t mtr_idx)
6393 {
6394         struct mlx5_priv *priv = dev->data->dev_private;
6395         struct mlx5_aso_mtr_pools_mng *pools_mng =
6396                                 &priv->sh->mtrmng->pools_mng;
6397         struct mlx5_aso_mtr *aso_mtr = mlx5_aso_meter_by_idx(priv, mtr_idx);
6398
6399         MLX5_ASSERT(aso_mtr);
6400         rte_spinlock_lock(&pools_mng->mtrsl);
6401         memset(&aso_mtr->fm, 0, sizeof(struct mlx5_flow_meter_info));
6402         aso_mtr->state = ASO_METER_FREE;
6403         LIST_INSERT_HEAD(&pools_mng->meters, aso_mtr, next);
6404         rte_spinlock_unlock(&pools_mng->mtrsl);
6405 }
6406
6407 /**
6408  * Allocate a aso flow meter.
6409  *
6410  * @param[in] dev
6411  *   Pointer to the Ethernet device structure.
6412  *
6413  * @return
6414  *   Index to aso flow meter on success, 0 otherwise and rte_errno is set.
6415  */
6416 static uint32_t
6417 flow_dv_mtr_alloc(struct rte_eth_dev *dev)
6418 {
6419         struct mlx5_priv *priv = dev->data->dev_private;
6420         struct mlx5_aso_mtr *mtr_free = NULL;
6421         struct mlx5_aso_mtr_pools_mng *pools_mng =
6422                                 &priv->sh->mtrmng->pools_mng;
6423         struct mlx5_aso_mtr_pool *pool;
6424         uint32_t mtr_idx = 0;
6425
6426         if (!priv->config.devx) {
6427                 rte_errno = ENOTSUP;
6428                 return 0;
6429         }
6430         /* Allocate the flow meter memory. */
6431         /* Get free meters from management. */
6432         rte_spinlock_lock(&pools_mng->mtrsl);
6433         mtr_free = LIST_FIRST(&pools_mng->meters);
6434         if (mtr_free)
6435                 LIST_REMOVE(mtr_free, next);
6436         if (!mtr_free && !flow_dv_mtr_pool_create(dev, &mtr_free)) {
6437                 rte_spinlock_unlock(&pools_mng->mtrsl);
6438                 return 0;
6439         }
6440         mtr_free->state = ASO_METER_WAIT;
6441         rte_spinlock_unlock(&pools_mng->mtrsl);
6442         pool = container_of(mtr_free,
6443                         struct mlx5_aso_mtr_pool,
6444                         mtrs[mtr_free->offset]);
6445         mtr_idx = MLX5_MAKE_MTR_IDX(pool->index, mtr_free->offset);
6446         if (!mtr_free->fm.meter_action) {
6447 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
6448                 struct rte_flow_error error;
6449                 uint8_t reg_id;
6450
6451                 reg_id = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &error);
6452                 mtr_free->fm.meter_action =
6453                         mlx5_glue->dv_create_flow_action_aso
6454                                                 (priv->sh->rx_domain,
6455                                                  pool->devx_obj->obj,
6456                                                  mtr_free->offset,
6457                                                  (1 << MLX5_FLOW_COLOR_GREEN),
6458                                                  reg_id - REG_C_0);
6459 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
6460                 if (!mtr_free->fm.meter_action) {
6461                         flow_dv_aso_mtr_release_to_pool(dev, mtr_idx);
6462                         return 0;
6463                 }
6464         }
6465         return mtr_idx;
6466 }
6467
6468 /**
6469  * Verify the @p attributes will be correctly understood by the NIC and store
6470  * them in the @p flow if everything is correct.
6471  *
6472  * @param[in] dev
6473  *   Pointer to dev struct.
6474  * @param[in] attributes
6475  *   Pointer to flow attributes
6476  * @param[in] external
6477  *   This flow rule is created by request external to PMD.
6478  * @param[out] error
6479  *   Pointer to error structure.
6480  *
6481  * @return
6482  *   - 0 on success and non root table.
6483  *   - 1 on success and root table.
6484  *   - a negative errno value otherwise and rte_errno is set.
6485  */
6486 static int
6487 flow_dv_validate_attributes(struct rte_eth_dev *dev,
6488                             const struct mlx5_flow_tunnel *tunnel,
6489                             const struct rte_flow_attr *attributes,
6490                             const struct flow_grp_info *grp_info,
6491                             struct rte_flow_error *error)
6492 {
6493         struct mlx5_priv *priv = dev->data->dev_private;
6494         uint32_t lowest_priority = mlx5_get_lowest_priority(dev, attributes);
6495         int ret = 0;
6496
6497 #ifndef HAVE_MLX5DV_DR
6498         RTE_SET_USED(tunnel);
6499         RTE_SET_USED(grp_info);
6500         if (attributes->group)
6501                 return rte_flow_error_set(error, ENOTSUP,
6502                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
6503                                           NULL,
6504                                           "groups are not supported");
6505 #else
6506         uint32_t table = 0;
6507
6508         ret = mlx5_flow_group_to_table(dev, tunnel, attributes->group, &table,
6509                                        grp_info, error);
6510         if (ret)
6511                 return ret;
6512         if (!table)
6513                 ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
6514 #endif
6515         if (attributes->priority != MLX5_FLOW_LOWEST_PRIO_INDICATOR &&
6516             attributes->priority > lowest_priority)
6517                 return rte_flow_error_set(error, ENOTSUP,
6518                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
6519                                           NULL,
6520                                           "priority out of range");
6521         if (attributes->transfer) {
6522                 if (!priv->config.dv_esw_en)
6523                         return rte_flow_error_set
6524                                 (error, ENOTSUP,
6525                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6526                                  "E-Switch dr is not supported");
6527                 if (!(priv->representor || priv->master))
6528                         return rte_flow_error_set
6529                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6530                                  NULL, "E-Switch configuration can only be"
6531                                  " done by a master or a representor device");
6532                 if (attributes->egress)
6533                         return rte_flow_error_set
6534                                 (error, ENOTSUP,
6535                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
6536                                  "egress is not supported");
6537         }
6538         if (!(attributes->egress ^ attributes->ingress))
6539                 return rte_flow_error_set(error, ENOTSUP,
6540                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
6541                                           "must specify exactly one of "
6542                                           "ingress or egress");
6543         return ret;
6544 }
6545
6546 static uint16_t
6547 mlx5_flow_locate_proto_l3(const struct rte_flow_item **head,
6548                           const struct rte_flow_item *end)
6549 {
6550         const struct rte_flow_item *item = *head;
6551         uint16_t l3_protocol;
6552
6553         for (; item != end; item++) {
6554                 switch (item->type) {
6555                 default:
6556                         break;
6557                 case RTE_FLOW_ITEM_TYPE_IPV4:
6558                         l3_protocol = RTE_ETHER_TYPE_IPV4;
6559                         goto l3_ok;
6560                 case RTE_FLOW_ITEM_TYPE_IPV6:
6561                         l3_protocol = RTE_ETHER_TYPE_IPV6;
6562                         goto l3_ok;
6563                 case RTE_FLOW_ITEM_TYPE_ETH:
6564                         if (item->mask && item->spec) {
6565                                 MLX5_ETHER_TYPE_FROM_HEADER(rte_flow_item_eth,
6566                                                             type, item,
6567                                                             l3_protocol);
6568                                 if (l3_protocol == RTE_ETHER_TYPE_IPV4 ||
6569                                     l3_protocol == RTE_ETHER_TYPE_IPV6)
6570                                         goto l3_ok;
6571                         }
6572                         break;
6573                 case RTE_FLOW_ITEM_TYPE_VLAN:
6574                         if (item->mask && item->spec) {
6575                                 MLX5_ETHER_TYPE_FROM_HEADER(rte_flow_item_vlan,
6576                                                             inner_type, item,
6577                                                             l3_protocol);
6578                                 if (l3_protocol == RTE_ETHER_TYPE_IPV4 ||
6579                                     l3_protocol == RTE_ETHER_TYPE_IPV6)
6580                                         goto l3_ok;
6581                         }
6582                         break;
6583                 }
6584         }
6585         return 0;
6586 l3_ok:
6587         *head = item;
6588         return l3_protocol;
6589 }
6590
6591 static uint8_t
6592 mlx5_flow_locate_proto_l4(const struct rte_flow_item **head,
6593                           const struct rte_flow_item *end)
6594 {
6595         const struct rte_flow_item *item = *head;
6596         uint8_t l4_protocol;
6597
6598         for (; item != end; item++) {
6599                 switch (item->type) {
6600                 default:
6601                         break;
6602                 case RTE_FLOW_ITEM_TYPE_TCP:
6603                         l4_protocol = IPPROTO_TCP;
6604                         goto l4_ok;
6605                 case RTE_FLOW_ITEM_TYPE_UDP:
6606                         l4_protocol = IPPROTO_UDP;
6607                         goto l4_ok;
6608                 case RTE_FLOW_ITEM_TYPE_IPV4:
6609                         if (item->mask && item->spec) {
6610                                 const struct rte_flow_item_ipv4 *mask, *spec;
6611
6612                                 mask = (typeof(mask))item->mask;
6613                                 spec = (typeof(spec))item->spec;
6614                                 l4_protocol = mask->hdr.next_proto_id &
6615                                               spec->hdr.next_proto_id;
6616                                 if (l4_protocol == IPPROTO_TCP ||
6617                                     l4_protocol == IPPROTO_UDP)
6618                                         goto l4_ok;
6619                         }
6620                         break;
6621                 case RTE_FLOW_ITEM_TYPE_IPV6:
6622                         if (item->mask && item->spec) {
6623                                 const struct rte_flow_item_ipv6 *mask, *spec;
6624                                 mask = (typeof(mask))item->mask;
6625                                 spec = (typeof(spec))item->spec;
6626                                 l4_protocol = mask->hdr.proto & spec->hdr.proto;
6627                                 if (l4_protocol == IPPROTO_TCP ||
6628                                     l4_protocol == IPPROTO_UDP)
6629                                         goto l4_ok;
6630                         }
6631                         break;
6632                 }
6633         }
6634         return 0;
6635 l4_ok:
6636         *head = item;
6637         return l4_protocol;
6638 }
6639
6640 static int
6641 flow_dv_validate_item_integrity(struct rte_eth_dev *dev,
6642                                 const struct rte_flow_item *rule_items,
6643                                 const struct rte_flow_item *integrity_item,
6644                                 struct rte_flow_error *error)
6645 {
6646         struct mlx5_priv *priv = dev->data->dev_private;
6647         const struct rte_flow_item *tunnel_item, *end_item, *item = rule_items;
6648         const struct rte_flow_item_integrity *mask = (typeof(mask))
6649                                                      integrity_item->mask;
6650         const struct rte_flow_item_integrity *spec = (typeof(spec))
6651                                                      integrity_item->spec;
6652         uint32_t protocol;
6653
6654         if (!priv->config.hca_attr.pkt_integrity_match)
6655                 return rte_flow_error_set(error, ENOTSUP,
6656                                           RTE_FLOW_ERROR_TYPE_ITEM,
6657                                           integrity_item,
6658                                           "packet integrity integrity_item not supported");
6659         if (!mask)
6660                 mask = &rte_flow_item_integrity_mask;
6661         if (!mlx5_validate_integrity_item(mask))
6662                 return rte_flow_error_set(error, ENOTSUP,
6663                                           RTE_FLOW_ERROR_TYPE_ITEM,
6664                                           integrity_item,
6665                                           "unsupported integrity filter");
6666         tunnel_item = mlx5_flow_find_tunnel_item(rule_items);
6667         if (spec->level > 1) {
6668                 if (!tunnel_item)
6669                         return rte_flow_error_set(error, ENOTSUP,
6670                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6671                                                   integrity_item,
6672                                                   "missing tunnel item");
6673                 item = tunnel_item;
6674                 end_item = mlx5_find_end_item(tunnel_item);
6675         } else {
6676                 end_item = tunnel_item ? tunnel_item :
6677                            mlx5_find_end_item(integrity_item);
6678         }
6679         if (mask->l3_ok || mask->ipv4_csum_ok) {
6680                 protocol = mlx5_flow_locate_proto_l3(&item, end_item);
6681                 if (!protocol)
6682                         return rte_flow_error_set(error, EINVAL,
6683                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6684                                                   integrity_item,
6685                                                   "missing L3 protocol");
6686         }
6687         if (mask->l4_ok || mask->l4_csum_ok) {
6688                 protocol = mlx5_flow_locate_proto_l4(&item, end_item);
6689                 if (!protocol)
6690                         return rte_flow_error_set(error, EINVAL,
6691                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6692                                                   integrity_item,
6693                                                   "missing L4 protocol");
6694         }
6695         return 0;
6696 }
6697
6698 /**
6699  * Internal validation function. For validating both actions and items.
6700  *
6701  * @param[in] dev
6702  *   Pointer to the rte_eth_dev structure.
6703  * @param[in] attr
6704  *   Pointer to the flow attributes.
6705  * @param[in] items
6706  *   Pointer to the list of items.
6707  * @param[in] actions
6708  *   Pointer to the list of actions.
6709  * @param[in] external
6710  *   This flow rule is created by request external to PMD.
6711  * @param[in] hairpin
6712  *   Number of hairpin TX actions, 0 means classic flow.
6713  * @param[out] error
6714  *   Pointer to the error structure.
6715  *
6716  * @return
6717  *   0 on success, a negative errno value otherwise and rte_errno is set.
6718  */
6719 static int
6720 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
6721                  const struct rte_flow_item items[],
6722                  const struct rte_flow_action actions[],
6723                  bool external, int hairpin, struct rte_flow_error *error)
6724 {
6725         int ret;
6726         uint64_t action_flags = 0;
6727         uint64_t item_flags = 0;
6728         uint64_t last_item = 0;
6729         uint8_t next_protocol = 0xff;
6730         uint16_t ether_type = 0;
6731         int actions_n = 0;
6732         uint8_t item_ipv6_proto = 0;
6733         int fdb_mirror_limit = 0;
6734         int modify_after_mirror = 0;
6735         const struct rte_flow_item *geneve_item = NULL;
6736         const struct rte_flow_item *gre_item = NULL;
6737         const struct rte_flow_item *gtp_item = NULL;
6738         const struct rte_flow_action_raw_decap *decap;
6739         const struct rte_flow_action_raw_encap *encap;
6740         const struct rte_flow_action_rss *rss = NULL;
6741         const struct rte_flow_action_rss *sample_rss = NULL;
6742         const struct rte_flow_action_count *sample_count = NULL;
6743         const struct rte_flow_item_tcp nic_tcp_mask = {
6744                 .hdr = {
6745                         .tcp_flags = 0xFF,
6746                         .src_port = RTE_BE16(UINT16_MAX),
6747                         .dst_port = RTE_BE16(UINT16_MAX),
6748                 }
6749         };
6750         const struct rte_flow_item_ipv6 nic_ipv6_mask = {
6751                 .hdr = {
6752                         .src_addr =
6753                         "\xff\xff\xff\xff\xff\xff\xff\xff"
6754                         "\xff\xff\xff\xff\xff\xff\xff\xff",
6755                         .dst_addr =
6756                         "\xff\xff\xff\xff\xff\xff\xff\xff"
6757                         "\xff\xff\xff\xff\xff\xff\xff\xff",
6758                         .vtc_flow = RTE_BE32(0xffffffff),
6759                         .proto = 0xff,
6760                         .hop_limits = 0xff,
6761                 },
6762                 .has_frag_ext = 1,
6763         };
6764         const struct rte_flow_item_ecpri nic_ecpri_mask = {
6765                 .hdr = {
6766                         .common = {
6767                                 .u32 =
6768                                 RTE_BE32(((const struct rte_ecpri_common_hdr) {
6769                                         .type = 0xFF,
6770                                         }).u32),
6771                         },
6772                         .dummy[0] = 0xffffffff,
6773                 },
6774         };
6775         struct mlx5_priv *priv = dev->data->dev_private;
6776         struct mlx5_dev_config *dev_conf = &priv->config;
6777         uint16_t queue_index = 0xFFFF;
6778         const struct rte_flow_item_vlan *vlan_m = NULL;
6779         uint32_t rw_act_num = 0;
6780         uint64_t is_root;
6781         const struct mlx5_flow_tunnel *tunnel;
6782         enum mlx5_tof_rule_type tof_rule_type;
6783         struct flow_grp_info grp_info = {
6784                 .external = !!external,
6785                 .transfer = !!attr->transfer,
6786                 .fdb_def_rule = !!priv->fdb_def_rule,
6787                 .std_tbl_fix = true,
6788         };
6789         const struct rte_eth_hairpin_conf *conf;
6790         const struct rte_flow_item *rule_items = items;
6791         const struct rte_flow_item *port_id_item = NULL;
6792         bool def_policy = false;
6793
6794         if (items == NULL)
6795                 return -1;
6796         tunnel = is_tunnel_offload_active(dev) ?
6797                  mlx5_get_tof(items, actions, &tof_rule_type) : NULL;
6798         if (tunnel) {
6799                 if (priv->representor)
6800                         return rte_flow_error_set
6801                                 (error, ENOTSUP,
6802                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6803                                  NULL, "decap not supported for VF representor");
6804                 if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_SET_RULE)
6805                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
6806                 else if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_MATCH_RULE)
6807                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |
6808                                         MLX5_FLOW_ACTION_DECAP;
6809                 grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
6810                                         (dev, attr, tunnel, tof_rule_type);
6811         }
6812         ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error);
6813         if (ret < 0)
6814                 return ret;
6815         is_root = (uint64_t)ret;
6816         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
6817                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
6818                 int type = items->type;
6819
6820                 if (!mlx5_flow_os_item_supported(type))
6821                         return rte_flow_error_set(error, ENOTSUP,
6822                                                   RTE_FLOW_ERROR_TYPE_ITEM,
6823                                                   NULL, "item not supported");
6824                 switch (type) {
6825                 case RTE_FLOW_ITEM_TYPE_VOID:
6826                         break;
6827                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
6828                         ret = flow_dv_validate_item_port_id
6829                                         (dev, items, attr, item_flags, error);
6830                         if (ret < 0)
6831                                 return ret;
6832                         last_item = MLX5_FLOW_ITEM_PORT_ID;
6833                         port_id_item = items;
6834                         break;
6835                 case RTE_FLOW_ITEM_TYPE_ETH:
6836                         ret = mlx5_flow_validate_item_eth(items, item_flags,
6837                                                           true, error);
6838                         if (ret < 0)
6839                                 return ret;
6840                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
6841                                              MLX5_FLOW_LAYER_OUTER_L2;
6842                         if (items->mask != NULL && items->spec != NULL) {
6843                                 ether_type =
6844                                         ((const struct rte_flow_item_eth *)
6845                                          items->spec)->type;
6846                                 ether_type &=
6847                                         ((const struct rte_flow_item_eth *)
6848                                          items->mask)->type;
6849                                 ether_type = rte_be_to_cpu_16(ether_type);
6850                         } else {
6851                                 ether_type = 0;
6852                         }
6853                         break;
6854                 case RTE_FLOW_ITEM_TYPE_VLAN:
6855                         ret = flow_dv_validate_item_vlan(items, item_flags,
6856                                                          dev, error);
6857                         if (ret < 0)
6858                                 return ret;
6859                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
6860                                              MLX5_FLOW_LAYER_OUTER_VLAN;
6861                         if (items->mask != NULL && items->spec != NULL) {
6862                                 ether_type =
6863                                         ((const struct rte_flow_item_vlan *)
6864                                          items->spec)->inner_type;
6865                                 ether_type &=
6866                                         ((const struct rte_flow_item_vlan *)
6867                                          items->mask)->inner_type;
6868                                 ether_type = rte_be_to_cpu_16(ether_type);
6869                         } else {
6870                                 ether_type = 0;
6871                         }
6872                         /* Store outer VLAN mask for of_push_vlan action. */
6873                         if (!tunnel)
6874                                 vlan_m = items->mask;
6875                         break;
6876                 case RTE_FLOW_ITEM_TYPE_IPV4:
6877                         mlx5_flow_tunnel_ip_check(items, next_protocol,
6878                                                   &item_flags, &tunnel);
6879                         ret = flow_dv_validate_item_ipv4(items, item_flags,
6880                                                          last_item, ether_type,
6881                                                          error);
6882                         if (ret < 0)
6883                                 return ret;
6884                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
6885                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
6886                         if (items->mask != NULL &&
6887                             ((const struct rte_flow_item_ipv4 *)
6888                              items->mask)->hdr.next_proto_id) {
6889                                 next_protocol =
6890                                         ((const struct rte_flow_item_ipv4 *)
6891                                          (items->spec))->hdr.next_proto_id;
6892                                 next_protocol &=
6893                                         ((const struct rte_flow_item_ipv4 *)
6894                                          (items->mask))->hdr.next_proto_id;
6895                         } else {
6896                                 /* Reset for inner layer. */
6897                                 next_protocol = 0xff;
6898                         }
6899                         break;
6900                 case RTE_FLOW_ITEM_TYPE_IPV6:
6901                         mlx5_flow_tunnel_ip_check(items, next_protocol,
6902                                                   &item_flags, &tunnel);
6903                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
6904                                                            last_item,
6905                                                            ether_type,
6906                                                            &nic_ipv6_mask,
6907                                                            error);
6908                         if (ret < 0)
6909                                 return ret;
6910                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
6911                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
6912                         if (items->mask != NULL &&
6913                             ((const struct rte_flow_item_ipv6 *)
6914                              items->mask)->hdr.proto) {
6915                                 item_ipv6_proto =
6916                                         ((const struct rte_flow_item_ipv6 *)
6917                                          items->spec)->hdr.proto;
6918                                 next_protocol =
6919                                         ((const struct rte_flow_item_ipv6 *)
6920                                          items->spec)->hdr.proto;
6921                                 next_protocol &=
6922                                         ((const struct rte_flow_item_ipv6 *)
6923                                          items->mask)->hdr.proto;
6924                         } else {
6925                                 /* Reset for inner layer. */
6926                                 next_protocol = 0xff;
6927                         }
6928                         break;
6929                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
6930                         ret = flow_dv_validate_item_ipv6_frag_ext(items,
6931                                                                   item_flags,
6932                                                                   error);
6933                         if (ret < 0)
6934                                 return ret;
6935                         last_item = tunnel ?
6936                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
6937                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
6938                         if (items->mask != NULL &&
6939                             ((const struct rte_flow_item_ipv6_frag_ext *)
6940                              items->mask)->hdr.next_header) {
6941                                 next_protocol =
6942                                 ((const struct rte_flow_item_ipv6_frag_ext *)
6943                                  items->spec)->hdr.next_header;
6944                                 next_protocol &=
6945                                 ((const struct rte_flow_item_ipv6_frag_ext *)
6946                                  items->mask)->hdr.next_header;
6947                         } else {
6948                                 /* Reset for inner layer. */
6949                                 next_protocol = 0xff;
6950                         }
6951                         break;
6952                 case RTE_FLOW_ITEM_TYPE_TCP:
6953                         ret = mlx5_flow_validate_item_tcp
6954                                                 (items, item_flags,
6955                                                  next_protocol,
6956                                                  &nic_tcp_mask,
6957                                                  error);
6958                         if (ret < 0)
6959                                 return ret;
6960                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
6961                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
6962                         break;
6963                 case RTE_FLOW_ITEM_TYPE_UDP:
6964                         ret = mlx5_flow_validate_item_udp(items, item_flags,
6965                                                           next_protocol,
6966                                                           error);
6967                         if (ret < 0)
6968                                 return ret;
6969                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
6970                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
6971                         break;
6972                 case RTE_FLOW_ITEM_TYPE_GRE:
6973                         ret = mlx5_flow_validate_item_gre(items, item_flags,
6974                                                           next_protocol, error);
6975                         if (ret < 0)
6976                                 return ret;
6977                         gre_item = items;
6978                         last_item = MLX5_FLOW_LAYER_GRE;
6979                         break;
6980                 case RTE_FLOW_ITEM_TYPE_NVGRE:
6981                         ret = mlx5_flow_validate_item_nvgre(items, item_flags,
6982                                                             next_protocol,
6983                                                             error);
6984                         if (ret < 0)
6985                                 return ret;
6986                         last_item = MLX5_FLOW_LAYER_NVGRE;
6987                         break;
6988                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
6989                         ret = mlx5_flow_validate_item_gre_key
6990                                 (items, item_flags, gre_item, error);
6991                         if (ret < 0)
6992                                 return ret;
6993                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
6994                         break;
6995                 case RTE_FLOW_ITEM_TYPE_VXLAN:
6996                         ret = mlx5_flow_validate_item_vxlan(dev, items,
6997                                                             item_flags, attr,
6998                                                             error);
6999                         if (ret < 0)
7000                                 return ret;
7001                         last_item = MLX5_FLOW_LAYER_VXLAN;
7002                         break;
7003                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
7004                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
7005                                                                 item_flags, dev,
7006                                                                 error);
7007                         if (ret < 0)
7008                                 return ret;
7009                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
7010                         break;
7011                 case RTE_FLOW_ITEM_TYPE_GENEVE:
7012                         ret = mlx5_flow_validate_item_geneve(items,
7013                                                              item_flags, dev,
7014                                                              error);
7015                         if (ret < 0)
7016                                 return ret;
7017                         geneve_item = items;
7018                         last_item = MLX5_FLOW_LAYER_GENEVE;
7019                         break;
7020                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
7021                         ret = mlx5_flow_validate_item_geneve_opt(items,
7022                                                                  last_item,
7023                                                                  geneve_item,
7024                                                                  dev,
7025                                                                  error);
7026                         if (ret < 0)
7027                                 return ret;
7028                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
7029                         break;
7030                 case RTE_FLOW_ITEM_TYPE_MPLS:
7031                         ret = mlx5_flow_validate_item_mpls(dev, items,
7032                                                            item_flags,
7033                                                            last_item, error);
7034                         if (ret < 0)
7035                                 return ret;
7036                         last_item = MLX5_FLOW_LAYER_MPLS;
7037                         break;
7038
7039                 case RTE_FLOW_ITEM_TYPE_MARK:
7040                         ret = flow_dv_validate_item_mark(dev, items, attr,
7041                                                          error);
7042                         if (ret < 0)
7043                                 return ret;
7044                         last_item = MLX5_FLOW_ITEM_MARK;
7045                         break;
7046                 case RTE_FLOW_ITEM_TYPE_META:
7047                         ret = flow_dv_validate_item_meta(dev, items, attr,
7048                                                          error);
7049                         if (ret < 0)
7050                                 return ret;
7051                         last_item = MLX5_FLOW_ITEM_METADATA;
7052                         break;
7053                 case RTE_FLOW_ITEM_TYPE_ICMP:
7054                         ret = mlx5_flow_validate_item_icmp(items, item_flags,
7055                                                            next_protocol,
7056                                                            error);
7057                         if (ret < 0)
7058                                 return ret;
7059                         last_item = MLX5_FLOW_LAYER_ICMP;
7060                         break;
7061                 case RTE_FLOW_ITEM_TYPE_ICMP6:
7062                         ret = mlx5_flow_validate_item_icmp6(items, item_flags,
7063                                                             next_protocol,
7064                                                             error);
7065                         if (ret < 0)
7066                                 return ret;
7067                         item_ipv6_proto = IPPROTO_ICMPV6;
7068                         last_item = MLX5_FLOW_LAYER_ICMP6;
7069                         break;
7070                 case RTE_FLOW_ITEM_TYPE_TAG:
7071                         ret = flow_dv_validate_item_tag(dev, items,
7072                                                         attr, error);
7073                         if (ret < 0)
7074                                 return ret;
7075                         last_item = MLX5_FLOW_ITEM_TAG;
7076                         break;
7077                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
7078                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
7079                         break;
7080                 case RTE_FLOW_ITEM_TYPE_GTP:
7081                         ret = flow_dv_validate_item_gtp(dev, items, item_flags,
7082                                                         error);
7083                         if (ret < 0)
7084                                 return ret;
7085                         gtp_item = items;
7086                         last_item = MLX5_FLOW_LAYER_GTP;
7087                         break;
7088                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
7089                         ret = flow_dv_validate_item_gtp_psc(items, last_item,
7090                                                             gtp_item, attr,
7091                                                             error);
7092                         if (ret < 0)
7093                                 return ret;
7094                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
7095                         break;
7096                 case RTE_FLOW_ITEM_TYPE_ECPRI:
7097                         /* Capacity will be checked in the translate stage. */
7098                         ret = mlx5_flow_validate_item_ecpri(items, item_flags,
7099                                                             last_item,
7100                                                             ether_type,
7101                                                             &nic_ecpri_mask,
7102                                                             error);
7103                         if (ret < 0)
7104                                 return ret;
7105                         last_item = MLX5_FLOW_LAYER_ECPRI;
7106                         break;
7107                 case RTE_FLOW_ITEM_TYPE_INTEGRITY:
7108                         if (item_flags & MLX5_FLOW_ITEM_INTEGRITY)
7109                                 return rte_flow_error_set
7110                                         (error, ENOTSUP,
7111                                          RTE_FLOW_ERROR_TYPE_ITEM,
7112                                          NULL, "multiple integrity items not supported");
7113                         ret = flow_dv_validate_item_integrity(dev, rule_items,
7114                                                               items, error);
7115                         if (ret < 0)
7116                                 return ret;
7117                         last_item = MLX5_FLOW_ITEM_INTEGRITY;
7118                         break;
7119                 case RTE_FLOW_ITEM_TYPE_CONNTRACK:
7120                         ret = flow_dv_validate_item_aso_ct(dev, items,
7121                                                            &item_flags, error);
7122                         if (ret < 0)
7123                                 return ret;
7124                         break;
7125                 case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
7126                         /* tunnel offload item was processed before
7127                          * list it here as a supported type
7128                          */
7129                         break;
7130                 default:
7131                         return rte_flow_error_set(error, ENOTSUP,
7132                                                   RTE_FLOW_ERROR_TYPE_ITEM,
7133                                                   NULL, "item not supported");
7134                 }
7135                 item_flags |= last_item;
7136         }
7137         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
7138                 int type = actions->type;
7139                 bool shared_count = false;
7140
7141                 if (!mlx5_flow_os_action_supported(type))
7142                         return rte_flow_error_set(error, ENOTSUP,
7143                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7144                                                   actions,
7145                                                   "action not supported");
7146                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
7147                         return rte_flow_error_set(error, ENOTSUP,
7148                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7149                                                   actions, "too many actions");
7150                 if (action_flags &
7151                         MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
7152                         return rte_flow_error_set(error, ENOTSUP,
7153                                 RTE_FLOW_ERROR_TYPE_ACTION,
7154                                 NULL, "meter action with policy "
7155                                 "must be the last action");
7156                 switch (type) {
7157                 case RTE_FLOW_ACTION_TYPE_VOID:
7158                         break;
7159                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
7160                         ret = flow_dv_validate_action_port_id(dev,
7161                                                               action_flags,
7162                                                               actions,
7163                                                               attr,
7164                                                               error);
7165                         if (ret)
7166                                 return ret;
7167                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
7168                         ++actions_n;
7169                         break;
7170                 case RTE_FLOW_ACTION_TYPE_FLAG:
7171                         ret = flow_dv_validate_action_flag(dev, action_flags,
7172                                                            attr, error);
7173                         if (ret < 0)
7174                                 return ret;
7175                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
7176                                 /* Count all modify-header actions as one. */
7177                                 if (!(action_flags &
7178                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
7179                                         ++actions_n;
7180                                 action_flags |= MLX5_FLOW_ACTION_FLAG |
7181                                                 MLX5_FLOW_ACTION_MARK_EXT;
7182                                 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7183                                         modify_after_mirror = 1;
7184
7185                         } else {
7186                                 action_flags |= MLX5_FLOW_ACTION_FLAG;
7187                                 ++actions_n;
7188                         }
7189                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
7190                         break;
7191                 case RTE_FLOW_ACTION_TYPE_MARK:
7192                         ret = flow_dv_validate_action_mark(dev, actions,
7193                                                            action_flags,
7194                                                            attr, error);
7195                         if (ret < 0)
7196                                 return ret;
7197                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
7198                                 /* Count all modify-header actions as one. */
7199                                 if (!(action_flags &
7200                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
7201                                         ++actions_n;
7202                                 action_flags |= MLX5_FLOW_ACTION_MARK |
7203                                                 MLX5_FLOW_ACTION_MARK_EXT;
7204                                 if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7205                                         modify_after_mirror = 1;
7206                         } else {
7207                                 action_flags |= MLX5_FLOW_ACTION_MARK;
7208                                 ++actions_n;
7209                         }
7210                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
7211                         break;
7212                 case RTE_FLOW_ACTION_TYPE_SET_META:
7213                         ret = flow_dv_validate_action_set_meta(dev, actions,
7214                                                                action_flags,
7215                                                                attr, error);
7216                         if (ret < 0)
7217                                 return ret;
7218                         /* Count all modify-header actions as one action. */
7219                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7220                                 ++actions_n;
7221                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7222                                 modify_after_mirror = 1;
7223                         action_flags |= MLX5_FLOW_ACTION_SET_META;
7224                         rw_act_num += MLX5_ACT_NUM_SET_META;
7225                         break;
7226                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
7227                         ret = flow_dv_validate_action_set_tag(dev, actions,
7228                                                               action_flags,
7229                                                               attr, error);
7230                         if (ret < 0)
7231                                 return ret;
7232                         /* Count all modify-header actions as one action. */
7233                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7234                                 ++actions_n;
7235                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7236                                 modify_after_mirror = 1;
7237                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
7238                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7239                         break;
7240                 case RTE_FLOW_ACTION_TYPE_DROP:
7241                         ret = mlx5_flow_validate_action_drop(action_flags,
7242                                                              attr, error);
7243                         if (ret < 0)
7244                                 return ret;
7245                         action_flags |= MLX5_FLOW_ACTION_DROP;
7246                         ++actions_n;
7247                         break;
7248                 case RTE_FLOW_ACTION_TYPE_QUEUE:
7249                         ret = mlx5_flow_validate_action_queue(actions,
7250                                                               action_flags, dev,
7251                                                               attr, error);
7252                         if (ret < 0)
7253                                 return ret;
7254                         queue_index = ((const struct rte_flow_action_queue *)
7255                                                         (actions->conf))->index;
7256                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
7257                         ++actions_n;
7258                         break;
7259                 case RTE_FLOW_ACTION_TYPE_RSS:
7260                         rss = actions->conf;
7261                         ret = mlx5_flow_validate_action_rss(actions,
7262                                                             action_flags, dev,
7263                                                             attr, item_flags,
7264                                                             error);
7265                         if (ret < 0)
7266                                 return ret;
7267                         if (rss && sample_rss &&
7268                             (sample_rss->level != rss->level ||
7269                             sample_rss->types != rss->types))
7270                                 return rte_flow_error_set(error, ENOTSUP,
7271                                         RTE_FLOW_ERROR_TYPE_ACTION,
7272                                         NULL,
7273                                         "Can't use the different RSS types "
7274                                         "or level in the same flow");
7275                         if (rss != NULL && rss->queue_num)
7276                                 queue_index = rss->queue[0];
7277                         action_flags |= MLX5_FLOW_ACTION_RSS;
7278                         ++actions_n;
7279                         break;
7280                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
7281                         ret =
7282                         mlx5_flow_validate_action_default_miss(action_flags,
7283                                         attr, error);
7284                         if (ret < 0)
7285                                 return ret;
7286                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
7287                         ++actions_n;
7288                         break;
7289                 case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
7290                 case RTE_FLOW_ACTION_TYPE_COUNT:
7291                         shared_count = is_shared_action_count(actions);
7292                         ret = flow_dv_validate_action_count(dev, shared_count,
7293                                                             action_flags,
7294                                                             error);
7295                         if (ret < 0)
7296                                 return ret;
7297                         action_flags |= MLX5_FLOW_ACTION_COUNT;
7298                         ++actions_n;
7299                         break;
7300                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
7301                         if (flow_dv_validate_action_pop_vlan(dev,
7302                                                              action_flags,
7303                                                              actions,
7304                                                              item_flags, attr,
7305                                                              error))
7306                                 return -rte_errno;
7307                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7308                                 modify_after_mirror = 1;
7309                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
7310                         ++actions_n;
7311                         break;
7312                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
7313                         ret = flow_dv_validate_action_push_vlan(dev,
7314                                                                 action_flags,
7315                                                                 vlan_m,
7316                                                                 actions, attr,
7317                                                                 error);
7318                         if (ret < 0)
7319                                 return ret;
7320                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7321                                 modify_after_mirror = 1;
7322                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
7323                         ++actions_n;
7324                         break;
7325                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
7326                         ret = flow_dv_validate_action_set_vlan_pcp
7327                                                 (action_flags, actions, error);
7328                         if (ret < 0)
7329                                 return ret;
7330                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7331                                 modify_after_mirror = 1;
7332                         /* Count PCP with push_vlan command. */
7333                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
7334                         break;
7335                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
7336                         ret = flow_dv_validate_action_set_vlan_vid
7337                                                 (item_flags, action_flags,
7338                                                  actions, error);
7339                         if (ret < 0)
7340                                 return ret;
7341                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7342                                 modify_after_mirror = 1;
7343                         /* Count VID with push_vlan command. */
7344                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
7345                         rw_act_num += MLX5_ACT_NUM_MDF_VID;
7346                         break;
7347                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
7348                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
7349                         ret = flow_dv_validate_action_l2_encap(dev,
7350                                                                action_flags,
7351                                                                actions, attr,
7352                                                                error);
7353                         if (ret < 0)
7354                                 return ret;
7355                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
7356                         ++actions_n;
7357                         break;
7358                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
7359                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
7360                         ret = flow_dv_validate_action_decap(dev, action_flags,
7361                                                             actions, item_flags,
7362                                                             attr, error);
7363                         if (ret < 0)
7364                                 return ret;
7365                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7366                                 modify_after_mirror = 1;
7367                         action_flags |= MLX5_FLOW_ACTION_DECAP;
7368                         ++actions_n;
7369                         break;
7370                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
7371                         ret = flow_dv_validate_action_raw_encap_decap
7372                                 (dev, NULL, actions->conf, attr, &action_flags,
7373                                  &actions_n, actions, item_flags, error);
7374                         if (ret < 0)
7375                                 return ret;
7376                         break;
7377                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
7378                         decap = actions->conf;
7379                         while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
7380                                 ;
7381                         if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
7382                                 encap = NULL;
7383                                 actions--;
7384                         } else {
7385                                 encap = actions->conf;
7386                         }
7387                         ret = flow_dv_validate_action_raw_encap_decap
7388                                            (dev,
7389                                             decap ? decap : &empty_decap, encap,
7390                                             attr, &action_flags, &actions_n,
7391                                             actions, item_flags, error);
7392                         if (ret < 0)
7393                                 return ret;
7394                         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
7395                             (action_flags & MLX5_FLOW_ACTION_DECAP))
7396                                 modify_after_mirror = 1;
7397                         break;
7398                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
7399                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
7400                         ret = flow_dv_validate_action_modify_mac(action_flags,
7401                                                                  actions,
7402                                                                  item_flags,
7403                                                                  error);
7404                         if (ret < 0)
7405                                 return ret;
7406                         /* Count all modify-header actions as one action. */
7407                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7408                                 ++actions_n;
7409                         action_flags |= actions->type ==
7410                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
7411                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
7412                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
7413                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7414                                 modify_after_mirror = 1;
7415                         /*
7416                          * Even if the source and destination MAC addresses have
7417                          * overlap in the header with 4B alignment, the convert
7418                          * function will handle them separately and 4 SW actions
7419                          * will be created. And 2 actions will be added each
7420                          * time no matter how many bytes of address will be set.
7421                          */
7422                         rw_act_num += MLX5_ACT_NUM_MDF_MAC;
7423                         break;
7424                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
7425                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
7426                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
7427                                                                   actions,
7428                                                                   item_flags,
7429                                                                   error);
7430                         if (ret < 0)
7431                                 return ret;
7432                         /* Count all modify-header actions as one action. */
7433                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7434                                 ++actions_n;
7435                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7436                                 modify_after_mirror = 1;
7437                         action_flags |= actions->type ==
7438                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
7439                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
7440                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
7441                         rw_act_num += MLX5_ACT_NUM_MDF_IPV4;
7442                         break;
7443                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
7444                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
7445                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
7446                                                                   actions,
7447                                                                   item_flags,
7448                                                                   error);
7449                         if (ret < 0)
7450                                 return ret;
7451                         if (item_ipv6_proto == IPPROTO_ICMPV6)
7452                                 return rte_flow_error_set(error, ENOTSUP,
7453                                         RTE_FLOW_ERROR_TYPE_ACTION,
7454                                         actions,
7455                                         "Can't change header "
7456                                         "with ICMPv6 proto");
7457                         /* Count all modify-header actions as one action. */
7458                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7459                                 ++actions_n;
7460                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7461                                 modify_after_mirror = 1;
7462                         action_flags |= actions->type ==
7463                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
7464                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
7465                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
7466                         rw_act_num += MLX5_ACT_NUM_MDF_IPV6;
7467                         break;
7468                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
7469                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
7470                         ret = flow_dv_validate_action_modify_tp(action_flags,
7471                                                                 actions,
7472                                                                 item_flags,
7473                                                                 error);
7474                         if (ret < 0)
7475                                 return ret;
7476                         /* Count all modify-header actions as one action. */
7477                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7478                                 ++actions_n;
7479                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7480                                 modify_after_mirror = 1;
7481                         action_flags |= actions->type ==
7482                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
7483                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
7484                                                 MLX5_FLOW_ACTION_SET_TP_DST;
7485                         rw_act_num += MLX5_ACT_NUM_MDF_PORT;
7486                         break;
7487                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
7488                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
7489                         ret = flow_dv_validate_action_modify_ttl(action_flags,
7490                                                                  actions,
7491                                                                  item_flags,
7492                                                                  error);
7493                         if (ret < 0)
7494                                 return ret;
7495                         /* Count all modify-header actions as one action. */
7496                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7497                                 ++actions_n;
7498                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7499                                 modify_after_mirror = 1;
7500                         action_flags |= actions->type ==
7501                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
7502                                                 MLX5_FLOW_ACTION_SET_TTL :
7503                                                 MLX5_FLOW_ACTION_DEC_TTL;
7504                         rw_act_num += MLX5_ACT_NUM_MDF_TTL;
7505                         break;
7506                 case RTE_FLOW_ACTION_TYPE_JUMP:
7507                         ret = flow_dv_validate_action_jump(dev, tunnel, actions,
7508                                                            action_flags,
7509                                                            attr, external,
7510                                                            error);
7511                         if (ret)
7512                                 return ret;
7513                         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
7514                             fdb_mirror_limit)
7515                                 return rte_flow_error_set(error, EINVAL,
7516                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7517                                                   NULL,
7518                                                   "sample and jump action combination is not supported");
7519                         ++actions_n;
7520                         action_flags |= MLX5_FLOW_ACTION_JUMP;
7521                         break;
7522                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
7523                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
7524                         ret = flow_dv_validate_action_modify_tcp_seq
7525                                                                 (action_flags,
7526                                                                  actions,
7527                                                                  item_flags,
7528                                                                  error);
7529                         if (ret < 0)
7530                                 return ret;
7531                         /* Count all modify-header actions as one action. */
7532                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7533                                 ++actions_n;
7534                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7535                                 modify_after_mirror = 1;
7536                         action_flags |= actions->type ==
7537                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
7538                                                 MLX5_FLOW_ACTION_INC_TCP_SEQ :
7539                                                 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
7540                         rw_act_num += MLX5_ACT_NUM_MDF_TCPSEQ;
7541                         break;
7542                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
7543                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
7544                         ret = flow_dv_validate_action_modify_tcp_ack
7545                                                                 (action_flags,
7546                                                                  actions,
7547                                                                  item_flags,
7548                                                                  error);
7549                         if (ret < 0)
7550                                 return ret;
7551                         /* Count all modify-header actions as one action. */
7552                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7553                                 ++actions_n;
7554                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7555                                 modify_after_mirror = 1;
7556                         action_flags |= actions->type ==
7557                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
7558                                                 MLX5_FLOW_ACTION_INC_TCP_ACK :
7559                                                 MLX5_FLOW_ACTION_DEC_TCP_ACK;
7560                         rw_act_num += MLX5_ACT_NUM_MDF_TCPACK;
7561                         break;
7562                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
7563                         break;
7564                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
7565                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
7566                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7567                         break;
7568                 case RTE_FLOW_ACTION_TYPE_METER:
7569                         ret = mlx5_flow_validate_action_meter(dev,
7570                                                               action_flags,
7571                                                               actions, attr,
7572                                                               port_id_item,
7573                                                               &def_policy,
7574                                                               error);
7575                         if (ret < 0)
7576                                 return ret;
7577                         action_flags |= MLX5_FLOW_ACTION_METER;
7578                         if (!def_policy)
7579                                 action_flags |=
7580                                 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
7581                         ++actions_n;
7582                         /* Meter action will add one more TAG action. */
7583                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
7584                         break;
7585                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
7586                         if (!attr->transfer && !attr->group)
7587                                 return rte_flow_error_set(error, ENOTSUP,
7588                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7589                                                                            NULL,
7590                           "Shared ASO age action is not supported for group 0");
7591                         if (action_flags & MLX5_FLOW_ACTION_AGE)
7592                                 return rte_flow_error_set
7593                                                   (error, EINVAL,
7594                                                    RTE_FLOW_ERROR_TYPE_ACTION,
7595                                                    NULL,
7596                                                    "duplicate age actions set");
7597                         action_flags |= MLX5_FLOW_ACTION_AGE;
7598                         ++actions_n;
7599                         break;
7600                 case RTE_FLOW_ACTION_TYPE_AGE:
7601                         ret = flow_dv_validate_action_age(action_flags,
7602                                                           actions, dev,
7603                                                           error);
7604                         if (ret < 0)
7605                                 return ret;
7606                         /*
7607                          * Validate the regular AGE action (using counter)
7608                          * mutual exclusion with share counter actions.
7609                          */
7610                         if (!priv->sh->flow_hit_aso_en) {
7611                                 if (shared_count)
7612                                         return rte_flow_error_set
7613                                                 (error, EINVAL,
7614                                                 RTE_FLOW_ERROR_TYPE_ACTION,
7615                                                 NULL,
7616                                                 "old age and shared count combination is not supported");
7617                                 if (sample_count)
7618                                         return rte_flow_error_set
7619                                                 (error, EINVAL,
7620                                                 RTE_FLOW_ERROR_TYPE_ACTION,
7621                                                 NULL,
7622                                                 "old age action and count must be in the same sub flow");
7623                         }
7624                         action_flags |= MLX5_FLOW_ACTION_AGE;
7625                         ++actions_n;
7626                         break;
7627                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
7628                         ret = flow_dv_validate_action_modify_ipv4_dscp
7629                                                          (action_flags,
7630                                                           actions,
7631                                                           item_flags,
7632                                                           error);
7633                         if (ret < 0)
7634                                 return ret;
7635                         /* Count all modify-header actions as one action. */
7636                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7637                                 ++actions_n;
7638                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7639                                 modify_after_mirror = 1;
7640                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
7641                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
7642                         break;
7643                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
7644                         ret = flow_dv_validate_action_modify_ipv6_dscp
7645                                                                 (action_flags,
7646                                                                  actions,
7647                                                                  item_flags,
7648                                                                  error);
7649                         if (ret < 0)
7650                                 return ret;
7651                         /* Count all modify-header actions as one action. */
7652                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7653                                 ++actions_n;
7654                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7655                                 modify_after_mirror = 1;
7656                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
7657                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
7658                         break;
7659                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
7660                         ret = flow_dv_validate_action_sample(&action_flags,
7661                                                              actions, dev,
7662                                                              attr, item_flags,
7663                                                              rss, &sample_rss,
7664                                                              &sample_count,
7665                                                              &fdb_mirror_limit,
7666                                                              error);
7667                         if (ret < 0)
7668                                 return ret;
7669                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
7670                         ++actions_n;
7671                         break;
7672                 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
7673                         ret = flow_dv_validate_action_modify_field(dev,
7674                                                                    action_flags,
7675                                                                    actions,
7676                                                                    attr,
7677                                                                    error);
7678                         if (ret < 0)
7679                                 return ret;
7680                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
7681                                 modify_after_mirror = 1;
7682                         /* Count all modify-header actions as one action. */
7683                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
7684                                 ++actions_n;
7685                         action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
7686                         rw_act_num += ret;
7687                         break;
7688                 case RTE_FLOW_ACTION_TYPE_CONNTRACK:
7689                         ret = flow_dv_validate_action_aso_ct(dev, action_flags,
7690                                                              item_flags, attr,
7691                                                              error);
7692                         if (ret < 0)
7693                                 return ret;
7694                         action_flags |= MLX5_FLOW_ACTION_CT;
7695                         break;
7696                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
7697                         /* tunnel offload action was processed before
7698                          * list it here as a supported type
7699                          */
7700                         break;
7701                 default:
7702                         return rte_flow_error_set(error, ENOTSUP,
7703                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7704                                                   actions,
7705                                                   "action not supported");
7706                 }
7707         }
7708         /*
7709          * Validate actions in flow rules
7710          * - Explicit decap action is prohibited by the tunnel offload API.
7711          * - Drop action in tunnel steer rule is prohibited by the API.
7712          * - Application cannot use MARK action because it's value can mask
7713          *   tunnel default miss nitification.
7714          * - JUMP in tunnel match rule has no support in current PMD
7715          *   implementation.
7716          * - TAG & META are reserved for future uses.
7717          */
7718         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_SET) {
7719                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_DECAP    |
7720                                             MLX5_FLOW_ACTION_MARK     |
7721                                             MLX5_FLOW_ACTION_SET_TAG  |
7722                                             MLX5_FLOW_ACTION_SET_META |
7723                                             MLX5_FLOW_ACTION_DROP;
7724
7725                 if (action_flags & bad_actions_mask)
7726                         return rte_flow_error_set
7727                                         (error, EINVAL,
7728                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7729                                         "Invalid RTE action in tunnel "
7730                                         "set decap rule");
7731                 if (!(action_flags & MLX5_FLOW_ACTION_JUMP))
7732                         return rte_flow_error_set
7733                                         (error, EINVAL,
7734                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7735                                         "tunnel set decap rule must terminate "
7736                                         "with JUMP");
7737                 if (!attr->ingress)
7738                         return rte_flow_error_set
7739                                         (error, EINVAL,
7740                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7741                                         "tunnel flows for ingress traffic only");
7742         }
7743         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_MATCH) {
7744                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_JUMP    |
7745                                             MLX5_FLOW_ACTION_MARK    |
7746                                             MLX5_FLOW_ACTION_SET_TAG |
7747                                             MLX5_FLOW_ACTION_SET_META;
7748
7749                 if (action_flags & bad_actions_mask)
7750                         return rte_flow_error_set
7751                                         (error, EINVAL,
7752                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7753                                         "Invalid RTE action in tunnel "
7754                                         "set match rule");
7755         }
7756         /*
7757          * Validate the drop action mutual exclusion with other actions.
7758          * Drop action is mutually-exclusive with any other action, except for
7759          * Count action.
7760          * Drop action compatibility with tunnel offload was already validated.
7761          */
7762         if (action_flags & (MLX5_FLOW_ACTION_TUNNEL_MATCH |
7763                             MLX5_FLOW_ACTION_TUNNEL_MATCH));
7764         else if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
7765             (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
7766                 return rte_flow_error_set(error, EINVAL,
7767                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7768                                           "Drop action is mutually-exclusive "
7769                                           "with any other action, except for "
7770                                           "Count action");
7771         /* Eswitch has few restrictions on using items and actions */
7772         if (attr->transfer) {
7773                 if (!mlx5_flow_ext_mreg_supported(dev) &&
7774                     action_flags & MLX5_FLOW_ACTION_FLAG)
7775                         return rte_flow_error_set(error, ENOTSUP,
7776                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7777                                                   NULL,
7778                                                   "unsupported action FLAG");
7779                 if (!mlx5_flow_ext_mreg_supported(dev) &&
7780                     action_flags & MLX5_FLOW_ACTION_MARK)
7781                         return rte_flow_error_set(error, ENOTSUP,
7782                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7783                                                   NULL,
7784                                                   "unsupported action MARK");
7785                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
7786                         return rte_flow_error_set(error, ENOTSUP,
7787                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7788                                                   NULL,
7789                                                   "unsupported action QUEUE");
7790                 if (action_flags & MLX5_FLOW_ACTION_RSS)
7791                         return rte_flow_error_set(error, ENOTSUP,
7792                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7793                                                   NULL,
7794                                                   "unsupported action RSS");
7795                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
7796                         return rte_flow_error_set(error, EINVAL,
7797                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7798                                                   actions,
7799                                                   "no fate action is found");
7800         } else {
7801                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
7802                         return rte_flow_error_set(error, EINVAL,
7803                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7804                                                   actions,
7805                                                   "no fate action is found");
7806         }
7807         /*
7808          * Continue validation for Xcap and VLAN actions.
7809          * If hairpin is working in explicit TX rule mode, there is no actions
7810          * splitting and the validation of hairpin ingress flow should be the
7811          * same as other standard flows.
7812          */
7813         if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS |
7814                              MLX5_FLOW_VLAN_ACTIONS)) &&
7815             (queue_index == 0xFFFF ||
7816              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN ||
7817              ((conf = mlx5_rxq_get_hairpin_conf(dev, queue_index)) != NULL &&
7818              conf->tx_explicit != 0))) {
7819                 if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
7820                     MLX5_FLOW_XCAP_ACTIONS)
7821                         return rte_flow_error_set(error, ENOTSUP,
7822                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7823                                                   NULL, "encap and decap "
7824                                                   "combination aren't supported");
7825                 if (!attr->transfer && attr->ingress) {
7826                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
7827                                 return rte_flow_error_set
7828                                                 (error, ENOTSUP,
7829                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7830                                                  NULL, "encap is not supported"
7831                                                  " for ingress traffic");
7832                         else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
7833                                 return rte_flow_error_set
7834                                                 (error, ENOTSUP,
7835                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7836                                                  NULL, "push VLAN action not "
7837                                                  "supported for ingress");
7838                         else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) ==
7839                                         MLX5_FLOW_VLAN_ACTIONS)
7840                                 return rte_flow_error_set
7841                                                 (error, ENOTSUP,
7842                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7843                                                  NULL, "no support for "
7844                                                  "multiple VLAN actions");
7845                 }
7846         }
7847         if (action_flags & MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY) {
7848                 if ((action_flags & (MLX5_FLOW_FATE_ACTIONS &
7849                         ~MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)) &&
7850                         attr->ingress)
7851                         return rte_flow_error_set
7852                                 (error, ENOTSUP,
7853                                 RTE_FLOW_ERROR_TYPE_ACTION,
7854                                 NULL, "fate action not supported for "
7855                                 "meter with policy");
7856                 if (attr->egress) {
7857                         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
7858                                 return rte_flow_error_set
7859                                         (error, ENOTSUP,
7860                                         RTE_FLOW_ERROR_TYPE_ACTION,
7861                                         NULL, "modify header action in egress "
7862                                         "cannot be done before meter action");
7863                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
7864                                 return rte_flow_error_set
7865                                         (error, ENOTSUP,
7866                                         RTE_FLOW_ERROR_TYPE_ACTION,
7867                                         NULL, "encap action in egress "
7868                                         "cannot be done before meter action");
7869                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
7870                                 return rte_flow_error_set
7871                                         (error, ENOTSUP,
7872                                         RTE_FLOW_ERROR_TYPE_ACTION,
7873                                         NULL, "push vlan action in egress "
7874                                         "cannot be done before meter action");
7875                 }
7876         }
7877         /*
7878          * Hairpin flow will add one more TAG action in TX implicit mode.
7879          * In TX explicit mode, there will be no hairpin flow ID.
7880          */
7881         if (hairpin > 0)
7882                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
7883         /* extra metadata enabled: one more TAG action will be add. */
7884         if (dev_conf->dv_flow_en &&
7885             dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
7886             mlx5_flow_ext_mreg_supported(dev))
7887                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
7888         if (rw_act_num >
7889                         flow_dv_modify_hdr_action_max(dev, is_root)) {
7890                 return rte_flow_error_set(error, ENOTSUP,
7891                                           RTE_FLOW_ERROR_TYPE_ACTION,
7892                                           NULL, "too many header modify"
7893                                           " actions to support");
7894         }
7895         /* Eswitch egress mirror and modify flow has limitation on CX5 */
7896         if (fdb_mirror_limit && modify_after_mirror)
7897                 return rte_flow_error_set(error, EINVAL,
7898                                 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7899                                 "sample before modify action is not supported");
7900         return 0;
7901 }
7902
7903 /**
7904  * Internal preparation function. Allocates the DV flow size,
7905  * this size is constant.
7906  *
7907  * @param[in] dev
7908  *   Pointer to the rte_eth_dev structure.
7909  * @param[in] attr
7910  *   Pointer to the flow attributes.
7911  * @param[in] items
7912  *   Pointer to the list of items.
7913  * @param[in] actions
7914  *   Pointer to the list of actions.
7915  * @param[out] error
7916  *   Pointer to the error structure.
7917  *
7918  * @return
7919  *   Pointer to mlx5_flow object on success,
7920  *   otherwise NULL and rte_errno is set.
7921  */
7922 static struct mlx5_flow *
7923 flow_dv_prepare(struct rte_eth_dev *dev,
7924                 const struct rte_flow_attr *attr __rte_unused,
7925                 const struct rte_flow_item items[] __rte_unused,
7926                 const struct rte_flow_action actions[] __rte_unused,
7927                 struct rte_flow_error *error)
7928 {
7929         uint32_t handle_idx = 0;
7930         struct mlx5_flow *dev_flow;
7931         struct mlx5_flow_handle *dev_handle;
7932         struct mlx5_priv *priv = dev->data->dev_private;
7933         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
7934
7935         MLX5_ASSERT(wks);
7936         wks->skip_matcher_reg = 0;
7937         wks->policy = NULL;
7938         wks->final_policy = NULL;
7939         /* In case of corrupting the memory. */
7940         if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
7941                 rte_flow_error_set(error, ENOSPC,
7942                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7943                                    "not free temporary device flow");
7944                 return NULL;
7945         }
7946         dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
7947                                    &handle_idx);
7948         if (!dev_handle) {
7949                 rte_flow_error_set(error, ENOMEM,
7950                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7951                                    "not enough memory to create flow handle");
7952                 return NULL;
7953         }
7954         MLX5_ASSERT(wks->flow_idx < RTE_DIM(wks->flows));
7955         dev_flow = &wks->flows[wks->flow_idx++];
7956         memset(dev_flow, 0, sizeof(*dev_flow));
7957         dev_flow->handle = dev_handle;
7958         dev_flow->handle_idx = handle_idx;
7959         dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
7960         dev_flow->ingress = attr->ingress;
7961         dev_flow->dv.transfer = attr->transfer;
7962         return dev_flow;
7963 }
7964
7965 #ifdef RTE_LIBRTE_MLX5_DEBUG
7966 /**
7967  * Sanity check for match mask and value. Similar to check_valid_spec() in
7968  * kernel driver. If unmasked bit is present in value, it returns failure.
7969  *
7970  * @param match_mask
7971  *   pointer to match mask buffer.
7972  * @param match_value
7973  *   pointer to match value buffer.
7974  *
7975  * @return
7976  *   0 if valid, -EINVAL otherwise.
7977  */
7978 static int
7979 flow_dv_check_valid_spec(void *match_mask, void *match_value)
7980 {
7981         uint8_t *m = match_mask;
7982         uint8_t *v = match_value;
7983         unsigned int i;
7984
7985         for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
7986                 if (v[i] & ~m[i]) {
7987                         DRV_LOG(ERR,
7988                                 "match_value differs from match_criteria"
7989                                 " %p[%u] != %p[%u]",
7990                                 match_value, i, match_mask, i);
7991                         return -EINVAL;
7992                 }
7993         }
7994         return 0;
7995 }
7996 #endif
7997
7998 /**
7999  * Add match of ip_version.
8000  *
8001  * @param[in] group
8002  *   Flow group.
8003  * @param[in] headers_v
8004  *   Values header pointer.
8005  * @param[in] headers_m
8006  *   Masks header pointer.
8007  * @param[in] ip_version
8008  *   The IP version to set.
8009  */
8010 static inline void
8011 flow_dv_set_match_ip_version(uint32_t group,
8012                              void *headers_v,
8013                              void *headers_m,
8014                              uint8_t ip_version)
8015 {
8016         if (group == 0)
8017                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
8018         else
8019                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
8020                          ip_version);
8021         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version);
8022         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0);
8023         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0);
8024 }
8025
8026 /**
8027  * Add Ethernet item to matcher and to the value.
8028  *
8029  * @param[in, out] matcher
8030  *   Flow matcher.
8031  * @param[in, out] key
8032  *   Flow matcher value.
8033  * @param[in] item
8034  *   Flow pattern to translate.
8035  * @param[in] inner
8036  *   Item is inner pattern.
8037  */
8038 static void
8039 flow_dv_translate_item_eth(void *matcher, void *key,
8040                            const struct rte_flow_item *item, int inner,
8041                            uint32_t group)
8042 {
8043         const struct rte_flow_item_eth *eth_m = item->mask;
8044         const struct rte_flow_item_eth *eth_v = item->spec;
8045         const struct rte_flow_item_eth nic_mask = {
8046                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
8047                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
8048                 .type = RTE_BE16(0xffff),
8049                 .has_vlan = 0,
8050         };
8051         void *hdrs_m;
8052         void *hdrs_v;
8053         char *l24_v;
8054         unsigned int i;
8055
8056         if (!eth_v)
8057                 return;
8058         if (!eth_m)
8059                 eth_m = &nic_mask;
8060         if (inner) {
8061                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8062                                          inner_headers);
8063                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8064         } else {
8065                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8066                                          outer_headers);
8067                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8068         }
8069         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, dmac_47_16),
8070                &eth_m->dst, sizeof(eth_m->dst));
8071         /* The value must be in the range of the mask. */
8072         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, dmac_47_16);
8073         for (i = 0; i < sizeof(eth_m->dst); ++i)
8074                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
8075         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, smac_47_16),
8076                &eth_m->src, sizeof(eth_m->src));
8077         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, smac_47_16);
8078         /* The value must be in the range of the mask. */
8079         for (i = 0; i < sizeof(eth_m->dst); ++i)
8080                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
8081         /*
8082          * HW supports match on one Ethertype, the Ethertype following the last
8083          * VLAN tag of the packet (see PRM).
8084          * Set match on ethertype only if ETH header is not followed by VLAN.
8085          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
8086          * ethertype, and use ip_version field instead.
8087          * eCPRI over Ether layer will use type value 0xAEFE.
8088          */
8089         if (eth_m->type == 0xFFFF) {
8090                 /* Set cvlan_tag mask for any single\multi\un-tagged case. */
8091                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8092                 switch (eth_v->type) {
8093                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
8094                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8095                         return;
8096                 case RTE_BE16(RTE_ETHER_TYPE_QINQ):
8097                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8098                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8099                         return;
8100                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
8101                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
8102                         return;
8103                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
8104                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
8105                         return;
8106                 default:
8107                         break;
8108                 }
8109         }
8110         if (eth_m->has_vlan) {
8111                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8112                 if (eth_v->has_vlan) {
8113                         /*
8114                          * Here, when also has_more_vlan field in VLAN item is
8115                          * not set, only single-tagged packets will be matched.
8116                          */
8117                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8118                         return;
8119                 }
8120         }
8121         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
8122                  rte_be_to_cpu_16(eth_m->type));
8123         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
8124         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
8125 }
8126
8127 /**
8128  * Add VLAN item to matcher and to the value.
8129  *
8130  * @param[in, out] dev_flow
8131  *   Flow descriptor.
8132  * @param[in, out] matcher
8133  *   Flow matcher.
8134  * @param[in, out] key
8135  *   Flow matcher value.
8136  * @param[in] item
8137  *   Flow pattern to translate.
8138  * @param[in] inner
8139  *   Item is inner pattern.
8140  */
8141 static void
8142 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
8143                             void *matcher, void *key,
8144                             const struct rte_flow_item *item,
8145                             int inner, uint32_t group)
8146 {
8147         const struct rte_flow_item_vlan *vlan_m = item->mask;
8148         const struct rte_flow_item_vlan *vlan_v = item->spec;
8149         void *hdrs_m;
8150         void *hdrs_v;
8151         uint16_t tci_m;
8152         uint16_t tci_v;
8153
8154         if (inner) {
8155                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8156                                          inner_headers);
8157                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8158         } else {
8159                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
8160                                          outer_headers);
8161                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8162                 /*
8163                  * This is workaround, masks are not supported,
8164                  * and pre-validated.
8165                  */
8166                 if (vlan_v)
8167                         dev_flow->handle->vf_vlan.tag =
8168                                         rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
8169         }
8170         /*
8171          * When VLAN item exists in flow, mark packet as tagged,
8172          * even if TCI is not specified.
8173          */
8174         if (!MLX5_GET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag)) {
8175                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
8176                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
8177         }
8178         if (!vlan_v)
8179                 return;
8180         if (!vlan_m)
8181                 vlan_m = &rte_flow_item_vlan_mask;
8182         tci_m = rte_be_to_cpu_16(vlan_m->tci);
8183         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
8184         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_vid, tci_m);
8185         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_vid, tci_v);
8186         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_cfi, tci_m >> 12);
8187         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_cfi, tci_v >> 12);
8188         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_prio, tci_m >> 13);
8189         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_prio, tci_v >> 13);
8190         /*
8191          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
8192          * ethertype, and use ip_version field instead.
8193          */
8194         if (vlan_m->inner_type == 0xFFFF) {
8195                 switch (vlan_v->inner_type) {
8196                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
8197                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8198                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8199                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
8200                         return;
8201                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
8202                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
8203                         return;
8204                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
8205                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
8206                         return;
8207                 default:
8208                         break;
8209                 }
8210         }
8211         if (vlan_m->has_more_vlan && vlan_v->has_more_vlan) {
8212                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
8213                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
8214                 /* Only one vlan_tag bit can be set. */
8215                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
8216                 return;
8217         }
8218         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
8219                  rte_be_to_cpu_16(vlan_m->inner_type));
8220         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, ethertype,
8221                  rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
8222 }
8223
8224 /**
8225  * Add IPV4 item to matcher and to the value.
8226  *
8227  * @param[in, out] matcher
8228  *   Flow matcher.
8229  * @param[in, out] key
8230  *   Flow matcher value.
8231  * @param[in] item
8232  *   Flow pattern to translate.
8233  * @param[in] inner
8234  *   Item is inner pattern.
8235  * @param[in] group
8236  *   The group to insert the rule.
8237  */
8238 static void
8239 flow_dv_translate_item_ipv4(void *matcher, void *key,
8240                             const struct rte_flow_item *item,
8241                             int inner, uint32_t group)
8242 {
8243         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
8244         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
8245         const struct rte_flow_item_ipv4 nic_mask = {
8246                 .hdr = {
8247                         .src_addr = RTE_BE32(0xffffffff),
8248                         .dst_addr = RTE_BE32(0xffffffff),
8249                         .type_of_service = 0xff,
8250                         .next_proto_id = 0xff,
8251                         .time_to_live = 0xff,
8252                 },
8253         };
8254         void *headers_m;
8255         void *headers_v;
8256         char *l24_m;
8257         char *l24_v;
8258         uint8_t tos;
8259
8260         if (inner) {
8261                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8262                                          inner_headers);
8263                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8264         } else {
8265                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8266                                          outer_headers);
8267                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8268         }
8269         flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
8270         if (!ipv4_v)
8271                 return;
8272         if (!ipv4_m)
8273                 ipv4_m = &nic_mask;
8274         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8275                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
8276         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8277                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
8278         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
8279         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
8280         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8281                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
8282         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8283                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
8284         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
8285         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
8286         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
8287         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
8288                  ipv4_m->hdr.type_of_service);
8289         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
8290         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
8291                  ipv4_m->hdr.type_of_service >> 2);
8292         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
8293         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8294                  ipv4_m->hdr.next_proto_id);
8295         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8296                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
8297         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
8298                  ipv4_m->hdr.time_to_live);
8299         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
8300                  ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
8301         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
8302                  !!(ipv4_m->hdr.fragment_offset));
8303         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
8304                  !!(ipv4_v->hdr.fragment_offset & ipv4_m->hdr.fragment_offset));
8305 }
8306
8307 /**
8308  * Add IPV6 item to matcher and to the value.
8309  *
8310  * @param[in, out] matcher
8311  *   Flow matcher.
8312  * @param[in, out] key
8313  *   Flow matcher value.
8314  * @param[in] item
8315  *   Flow pattern to translate.
8316  * @param[in] inner
8317  *   Item is inner pattern.
8318  * @param[in] group
8319  *   The group to insert the rule.
8320  */
8321 static void
8322 flow_dv_translate_item_ipv6(void *matcher, void *key,
8323                             const struct rte_flow_item *item,
8324                             int inner, uint32_t group)
8325 {
8326         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
8327         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
8328         const struct rte_flow_item_ipv6 nic_mask = {
8329                 .hdr = {
8330                         .src_addr =
8331                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
8332                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
8333                         .dst_addr =
8334                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
8335                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
8336                         .vtc_flow = RTE_BE32(0xffffffff),
8337                         .proto = 0xff,
8338                         .hop_limits = 0xff,
8339                 },
8340         };
8341         void *headers_m;
8342         void *headers_v;
8343         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8344         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8345         char *l24_m;
8346         char *l24_v;
8347         uint32_t vtc_m;
8348         uint32_t vtc_v;
8349         int i;
8350         int size;
8351
8352         if (inner) {
8353                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8354                                          inner_headers);
8355                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8356         } else {
8357                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8358                                          outer_headers);
8359                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8360         }
8361         flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
8362         if (!ipv6_v)
8363                 return;
8364         if (!ipv6_m)
8365                 ipv6_m = &nic_mask;
8366         size = sizeof(ipv6_m->hdr.dst_addr);
8367         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8368                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
8369         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8370                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
8371         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
8372         for (i = 0; i < size; ++i)
8373                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
8374         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
8375                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
8376         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
8377                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
8378         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
8379         for (i = 0; i < size; ++i)
8380                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
8381         /* TOS. */
8382         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
8383         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
8384         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
8385         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
8386         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
8387         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
8388         /* Label. */
8389         if (inner) {
8390                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
8391                          vtc_m);
8392                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
8393                          vtc_v);
8394         } else {
8395                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
8396                          vtc_m);
8397                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
8398                          vtc_v);
8399         }
8400         /* Protocol. */
8401         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8402                  ipv6_m->hdr.proto);
8403         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8404                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
8405         /* Hop limit. */
8406         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
8407                  ipv6_m->hdr.hop_limits);
8408         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
8409                  ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
8410         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
8411                  !!(ipv6_m->has_frag_ext));
8412         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
8413                  !!(ipv6_v->has_frag_ext & ipv6_m->has_frag_ext));
8414 }
8415
8416 /**
8417  * Add IPV6 fragment extension item to matcher and to the value.
8418  *
8419  * @param[in, out] matcher
8420  *   Flow matcher.
8421  * @param[in, out] key
8422  *   Flow matcher value.
8423  * @param[in] item
8424  *   Flow pattern to translate.
8425  * @param[in] inner
8426  *   Item is inner pattern.
8427  */
8428 static void
8429 flow_dv_translate_item_ipv6_frag_ext(void *matcher, void *key,
8430                                      const struct rte_flow_item *item,
8431                                      int inner)
8432 {
8433         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_m = item->mask;
8434         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_v = item->spec;
8435         const struct rte_flow_item_ipv6_frag_ext nic_mask = {
8436                 .hdr = {
8437                         .next_header = 0xff,
8438                         .frag_data = RTE_BE16(0xffff),
8439                 },
8440         };
8441         void *headers_m;
8442         void *headers_v;
8443
8444         if (inner) {
8445                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8446                                          inner_headers);
8447                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8448         } else {
8449                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8450                                          outer_headers);
8451                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8452         }
8453         /* IPv6 fragment extension item exists, so packet is IP fragment. */
8454         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
8455         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 1);
8456         if (!ipv6_frag_ext_v)
8457                 return;
8458         if (!ipv6_frag_ext_m)
8459                 ipv6_frag_ext_m = &nic_mask;
8460         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
8461                  ipv6_frag_ext_m->hdr.next_header);
8462         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
8463                  ipv6_frag_ext_v->hdr.next_header &
8464                  ipv6_frag_ext_m->hdr.next_header);
8465 }
8466
8467 /**
8468  * Add TCP item to matcher and to the value.
8469  *
8470  * @param[in, out] matcher
8471  *   Flow matcher.
8472  * @param[in, out] key
8473  *   Flow matcher value.
8474  * @param[in] item
8475  *   Flow pattern to translate.
8476  * @param[in] inner
8477  *   Item is inner pattern.
8478  */
8479 static void
8480 flow_dv_translate_item_tcp(void *matcher, void *key,
8481                            const struct rte_flow_item *item,
8482                            int inner)
8483 {
8484         const struct rte_flow_item_tcp *tcp_m = item->mask;
8485         const struct rte_flow_item_tcp *tcp_v = item->spec;
8486         void *headers_m;
8487         void *headers_v;
8488
8489         if (inner) {
8490                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8491                                          inner_headers);
8492                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8493         } else {
8494                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8495                                          outer_headers);
8496                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8497         }
8498         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8499         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
8500         if (!tcp_v)
8501                 return;
8502         if (!tcp_m)
8503                 tcp_m = &rte_flow_item_tcp_mask;
8504         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
8505                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
8506         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
8507                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
8508         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
8509                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
8510         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
8511                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
8512         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
8513                  tcp_m->hdr.tcp_flags);
8514         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
8515                  (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
8516 }
8517
8518 /**
8519  * Add UDP item to matcher and to the value.
8520  *
8521  * @param[in, out] matcher
8522  *   Flow matcher.
8523  * @param[in, out] key
8524  *   Flow matcher value.
8525  * @param[in] item
8526  *   Flow pattern to translate.
8527  * @param[in] inner
8528  *   Item is inner pattern.
8529  */
8530 static void
8531 flow_dv_translate_item_udp(void *matcher, void *key,
8532                            const struct rte_flow_item *item,
8533                            int inner)
8534 {
8535         const struct rte_flow_item_udp *udp_m = item->mask;
8536         const struct rte_flow_item_udp *udp_v = item->spec;
8537         void *headers_m;
8538         void *headers_v;
8539
8540         if (inner) {
8541                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8542                                          inner_headers);
8543                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8544         } else {
8545                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8546                                          outer_headers);
8547                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8548         }
8549         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8550         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
8551         if (!udp_v)
8552                 return;
8553         if (!udp_m)
8554                 udp_m = &rte_flow_item_udp_mask;
8555         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
8556                  rte_be_to_cpu_16(udp_m->hdr.src_port));
8557         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
8558                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
8559         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
8560                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
8561         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
8562                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
8563 }
8564
8565 /**
8566  * Add GRE optional Key item to matcher and to the value.
8567  *
8568  * @param[in, out] matcher
8569  *   Flow matcher.
8570  * @param[in, out] key
8571  *   Flow matcher value.
8572  * @param[in] item
8573  *   Flow pattern to translate.
8574  * @param[in] inner
8575  *   Item is inner pattern.
8576  */
8577 static void
8578 flow_dv_translate_item_gre_key(void *matcher, void *key,
8579                                    const struct rte_flow_item *item)
8580 {
8581         const rte_be32_t *key_m = item->mask;
8582         const rte_be32_t *key_v = item->spec;
8583         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8584         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8585         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
8586
8587         /* GRE K bit must be on and should already be validated */
8588         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
8589         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
8590         if (!key_v)
8591                 return;
8592         if (!key_m)
8593                 key_m = &gre_key_default_mask;
8594         MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
8595                  rte_be_to_cpu_32(*key_m) >> 8);
8596         MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
8597                  rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
8598         MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
8599                  rte_be_to_cpu_32(*key_m) & 0xFF);
8600         MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
8601                  rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
8602 }
8603
8604 /**
8605  * Add GRE item to matcher and to the value.
8606  *
8607  * @param[in, out] matcher
8608  *   Flow matcher.
8609  * @param[in, out] key
8610  *   Flow matcher value.
8611  * @param[in] item
8612  *   Flow pattern to translate.
8613  * @param[in] inner
8614  *   Item is inner pattern.
8615  */
8616 static void
8617 flow_dv_translate_item_gre(void *matcher, void *key,
8618                            const struct rte_flow_item *item,
8619                            int inner)
8620 {
8621         const struct rte_flow_item_gre *gre_m = item->mask;
8622         const struct rte_flow_item_gre *gre_v = item->spec;
8623         void *headers_m;
8624         void *headers_v;
8625         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8626         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8627         struct {
8628                 union {
8629                         __extension__
8630                         struct {
8631                                 uint16_t version:3;
8632                                 uint16_t rsvd0:9;
8633                                 uint16_t s_present:1;
8634                                 uint16_t k_present:1;
8635                                 uint16_t rsvd_bit1:1;
8636                                 uint16_t c_present:1;
8637                         };
8638                         uint16_t value;
8639                 };
8640         } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
8641
8642         if (inner) {
8643                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8644                                          inner_headers);
8645                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8646         } else {
8647                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8648                                          outer_headers);
8649                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8650         }
8651         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
8652         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
8653         if (!gre_v)
8654                 return;
8655         if (!gre_m)
8656                 gre_m = &rte_flow_item_gre_mask;
8657         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
8658                  rte_be_to_cpu_16(gre_m->protocol));
8659         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
8660                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
8661         gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
8662         gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
8663         MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
8664                  gre_crks_rsvd0_ver_m.c_present);
8665         MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
8666                  gre_crks_rsvd0_ver_v.c_present &
8667                  gre_crks_rsvd0_ver_m.c_present);
8668         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
8669                  gre_crks_rsvd0_ver_m.k_present);
8670         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
8671                  gre_crks_rsvd0_ver_v.k_present &
8672                  gre_crks_rsvd0_ver_m.k_present);
8673         MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
8674                  gre_crks_rsvd0_ver_m.s_present);
8675         MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
8676                  gre_crks_rsvd0_ver_v.s_present &
8677                  gre_crks_rsvd0_ver_m.s_present);
8678 }
8679
8680 /**
8681  * Add NVGRE item to matcher and to the value.
8682  *
8683  * @param[in, out] matcher
8684  *   Flow matcher.
8685  * @param[in, out] key
8686  *   Flow matcher value.
8687  * @param[in] item
8688  *   Flow pattern to translate.
8689  * @param[in] inner
8690  *   Item is inner pattern.
8691  */
8692 static void
8693 flow_dv_translate_item_nvgre(void *matcher, void *key,
8694                              const struct rte_flow_item *item,
8695                              int inner)
8696 {
8697         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
8698         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
8699         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8700         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8701         const char *tni_flow_id_m;
8702         const char *tni_flow_id_v;
8703         char *gre_key_m;
8704         char *gre_key_v;
8705         int size;
8706         int i;
8707
8708         /* For NVGRE, GRE header fields must be set with defined values. */
8709         const struct rte_flow_item_gre gre_spec = {
8710                 .c_rsvd0_ver = RTE_BE16(0x2000),
8711                 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
8712         };
8713         const struct rte_flow_item_gre gre_mask = {
8714                 .c_rsvd0_ver = RTE_BE16(0xB000),
8715                 .protocol = RTE_BE16(UINT16_MAX),
8716         };
8717         const struct rte_flow_item gre_item = {
8718                 .spec = &gre_spec,
8719                 .mask = &gre_mask,
8720                 .last = NULL,
8721         };
8722         flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
8723         if (!nvgre_v)
8724                 return;
8725         if (!nvgre_m)
8726                 nvgre_m = &rte_flow_item_nvgre_mask;
8727         tni_flow_id_m = (const char *)nvgre_m->tni;
8728         tni_flow_id_v = (const char *)nvgre_v->tni;
8729         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
8730         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
8731         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
8732         memcpy(gre_key_m, tni_flow_id_m, size);
8733         for (i = 0; i < size; ++i)
8734                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
8735 }
8736
8737 /**
8738  * Add VXLAN item to matcher and to the value.
8739  *
8740  * @param[in] dev
8741  *   Pointer to the Ethernet device structure.
8742  * @param[in] attr
8743  *   Flow rule attributes.
8744  * @param[in, out] matcher
8745  *   Flow matcher.
8746  * @param[in, out] key
8747  *   Flow matcher value.
8748  * @param[in] item
8749  *   Flow pattern to translate.
8750  * @param[in] inner
8751  *   Item is inner pattern.
8752  */
8753 static void
8754 flow_dv_translate_item_vxlan(struct rte_eth_dev *dev,
8755                              const struct rte_flow_attr *attr,
8756                              void *matcher, void *key,
8757                              const struct rte_flow_item *item,
8758                              int inner)
8759 {
8760         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
8761         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
8762         void *headers_m;
8763         void *headers_v;
8764         void *misc5_m;
8765         void *misc5_v;
8766         uint32_t *tunnel_header_v;
8767         uint32_t *tunnel_header_m;
8768         uint16_t dport;
8769         struct mlx5_priv *priv = dev->data->dev_private;
8770         const struct rte_flow_item_vxlan nic_mask = {
8771                 .vni = "\xff\xff\xff",
8772                 .rsvd1 = 0xff,
8773         };
8774
8775         if (inner) {
8776                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8777                                          inner_headers);
8778                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8779         } else {
8780                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8781                                          outer_headers);
8782                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8783         }
8784         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
8785                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
8786         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8787                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8788                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8789         }
8790         if (!vxlan_v)
8791                 return;
8792         if (!vxlan_m) {
8793                 if ((!attr->group && !priv->sh->tunnel_header_0_1) ||
8794                     (attr->group && !priv->sh->misc5_cap))
8795                         vxlan_m = &rte_flow_item_vxlan_mask;
8796                 else
8797                         vxlan_m = &nic_mask;
8798         }
8799         if ((!attr->group && !attr->transfer && !priv->sh->tunnel_header_0_1) ||
8800             ((attr->group || attr->transfer) && !priv->sh->misc5_cap)) {
8801                 void *misc_m;
8802                 void *misc_v;
8803                 char *vni_m;
8804                 char *vni_v;
8805                 int size;
8806                 int i;
8807                 misc_m = MLX5_ADDR_OF(fte_match_param,
8808                                       matcher, misc_parameters);
8809                 misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8810                 size = sizeof(vxlan_m->vni);
8811                 vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
8812                 vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
8813                 memcpy(vni_m, vxlan_m->vni, size);
8814                 for (i = 0; i < size; ++i)
8815                         vni_v[i] = vni_m[i] & vxlan_v->vni[i];
8816                 return;
8817         }
8818         misc5_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_5);
8819         misc5_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_5);
8820         tunnel_header_v = (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc5,
8821                                                    misc5_v,
8822                                                    tunnel_header_1);
8823         tunnel_header_m = (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc5,
8824                                                    misc5_m,
8825                                                    tunnel_header_1);
8826         *tunnel_header_v = (vxlan_v->vni[0] & vxlan_m->vni[0]) |
8827                            (vxlan_v->vni[1] & vxlan_m->vni[1]) << 8 |
8828                            (vxlan_v->vni[2] & vxlan_m->vni[2]) << 16;
8829         if (*tunnel_header_v)
8830                 *tunnel_header_m = vxlan_m->vni[0] |
8831                         vxlan_m->vni[1] << 8 |
8832                         vxlan_m->vni[2] << 16;
8833         else
8834                 *tunnel_header_m = 0x0;
8835         *tunnel_header_v |= (vxlan_v->rsvd1 & vxlan_m->rsvd1) << 24;
8836         if (vxlan_v->rsvd1 & vxlan_m->rsvd1)
8837                 *tunnel_header_m |= vxlan_m->rsvd1 << 24;
8838 }
8839
8840 /**
8841  * Add VXLAN-GPE item to matcher and to the value.
8842  *
8843  * @param[in, out] matcher
8844  *   Flow matcher.
8845  * @param[in, out] key
8846  *   Flow matcher value.
8847  * @param[in] item
8848  *   Flow pattern to translate.
8849  * @param[in] inner
8850  *   Item is inner pattern.
8851  */
8852
8853 static void
8854 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
8855                                  const struct rte_flow_item *item, int inner)
8856 {
8857         const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
8858         const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
8859         void *headers_m;
8860         void *headers_v;
8861         void *misc_m =
8862                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
8863         void *misc_v =
8864                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
8865         char *vni_m;
8866         char *vni_v;
8867         uint16_t dport;
8868         int size;
8869         int i;
8870         uint8_t flags_m = 0xff;
8871         uint8_t flags_v = 0xc;
8872
8873         if (inner) {
8874                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8875                                          inner_headers);
8876                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8877         } else {
8878                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8879                                          outer_headers);
8880                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8881         }
8882         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
8883                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
8884         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8885                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8886                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8887         }
8888         if (!vxlan_v)
8889                 return;
8890         if (!vxlan_m)
8891                 vxlan_m = &rte_flow_item_vxlan_gpe_mask;
8892         size = sizeof(vxlan_m->vni);
8893         vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
8894         vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
8895         memcpy(vni_m, vxlan_m->vni, size);
8896         for (i = 0; i < size; ++i)
8897                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
8898         if (vxlan_m->flags) {
8899                 flags_m = vxlan_m->flags;
8900                 flags_v = vxlan_v->flags;
8901         }
8902         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
8903         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
8904         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol,
8905                  vxlan_m->protocol);
8906         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol,
8907                  vxlan_v->protocol);
8908 }
8909
8910 /**
8911  * Add Geneve item to matcher and to the value.
8912  *
8913  * @param[in, out] matcher
8914  *   Flow matcher.
8915  * @param[in, out] key
8916  *   Flow matcher value.
8917  * @param[in] item
8918  *   Flow pattern to translate.
8919  * @param[in] inner
8920  *   Item is inner pattern.
8921  */
8922
8923 static void
8924 flow_dv_translate_item_geneve(void *matcher, void *key,
8925                               const struct rte_flow_item *item, int inner)
8926 {
8927         const struct rte_flow_item_geneve *geneve_m = item->mask;
8928         const struct rte_flow_item_geneve *geneve_v = item->spec;
8929         void *headers_m;
8930         void *headers_v;
8931         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8932         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8933         uint16_t dport;
8934         uint16_t gbhdr_m;
8935         uint16_t gbhdr_v;
8936         char *vni_m;
8937         char *vni_v;
8938         size_t size, i;
8939
8940         if (inner) {
8941                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8942                                          inner_headers);
8943                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8944         } else {
8945                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8946                                          outer_headers);
8947                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8948         }
8949         dport = MLX5_UDP_PORT_GENEVE;
8950         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8951                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8952                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8953         }
8954         if (!geneve_v)
8955                 return;
8956         if (!geneve_m)
8957                 geneve_m = &rte_flow_item_geneve_mask;
8958         size = sizeof(geneve_m->vni);
8959         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
8960         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
8961         memcpy(vni_m, geneve_m->vni, size);
8962         for (i = 0; i < size; ++i)
8963                 vni_v[i] = vni_m[i] & geneve_v->vni[i];
8964         MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
8965                  rte_be_to_cpu_16(geneve_m->protocol));
8966         MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
8967                  rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
8968         gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
8969         gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
8970         MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
8971                  MLX5_GENEVE_OAMF_VAL(gbhdr_m));
8972         MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
8973                  MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
8974         MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
8975                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
8976         MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
8977                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
8978                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
8979 }
8980
8981 /**
8982  * Create Geneve TLV option resource.
8983  *
8984  * @param dev[in, out]
8985  *   Pointer to rte_eth_dev structure.
8986  * @param[in, out] tag_be24
8987  *   Tag value in big endian then R-shift 8.
8988  * @parm[in, out] dev_flow
8989  *   Pointer to the dev_flow.
8990  * @param[out] error
8991  *   pointer to error structure.
8992  *
8993  * @return
8994  *   0 on success otherwise -errno and errno is set.
8995  */
8996
8997 int
8998 flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev,
8999                                              const struct rte_flow_item *item,
9000                                              struct rte_flow_error *error)
9001 {
9002         struct mlx5_priv *priv = dev->data->dev_private;
9003         struct mlx5_dev_ctx_shared *sh = priv->sh;
9004         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
9005                         sh->geneve_tlv_option_resource;
9006         struct mlx5_devx_obj *obj;
9007         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
9008         int ret = 0;
9009
9010         if (!geneve_opt_v)
9011                 return -1;
9012         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
9013         if (geneve_opt_resource != NULL) {
9014                 if (geneve_opt_resource->option_class ==
9015                         geneve_opt_v->option_class &&
9016                         geneve_opt_resource->option_type ==
9017                         geneve_opt_v->option_type &&
9018                         geneve_opt_resource->length ==
9019                         geneve_opt_v->option_len) {
9020                         /* We already have GENVE TLV option obj allocated. */
9021                         __atomic_fetch_add(&geneve_opt_resource->refcnt, 1,
9022                                            __ATOMIC_RELAXED);
9023                 } else {
9024                         ret = rte_flow_error_set(error, ENOMEM,
9025                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9026                                 "Only one GENEVE TLV option supported");
9027                         goto exit;
9028                 }
9029         } else {
9030                 /* Create a GENEVE TLV object and resource. */
9031                 obj = mlx5_devx_cmd_create_geneve_tlv_option(sh->ctx,
9032                                 geneve_opt_v->option_class,
9033                                 geneve_opt_v->option_type,
9034                                 geneve_opt_v->option_len);
9035                 if (!obj) {
9036                         ret = rte_flow_error_set(error, ENODATA,
9037                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9038                                 "Failed to create GENEVE TLV Devx object");
9039                         goto exit;
9040                 }
9041                 sh->geneve_tlv_option_resource =
9042                                 mlx5_malloc(MLX5_MEM_ZERO,
9043                                                 sizeof(*geneve_opt_resource),
9044                                                 0, SOCKET_ID_ANY);
9045                 if (!sh->geneve_tlv_option_resource) {
9046                         claim_zero(mlx5_devx_cmd_destroy(obj));
9047                         ret = rte_flow_error_set(error, ENOMEM,
9048                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9049                                 "GENEVE TLV object memory allocation failed");
9050                         goto exit;
9051                 }
9052                 geneve_opt_resource = sh->geneve_tlv_option_resource;
9053                 geneve_opt_resource->obj = obj;
9054                 geneve_opt_resource->option_class = geneve_opt_v->option_class;
9055                 geneve_opt_resource->option_type = geneve_opt_v->option_type;
9056                 geneve_opt_resource->length = geneve_opt_v->option_len;
9057                 __atomic_store_n(&geneve_opt_resource->refcnt, 1,
9058                                 __ATOMIC_RELAXED);
9059         }
9060 exit:
9061         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
9062         return ret;
9063 }
9064
9065 /**
9066  * Add Geneve TLV option item to matcher.
9067  *
9068  * @param[in, out] dev
9069  *   Pointer to rte_eth_dev structure.
9070  * @param[in, out] matcher
9071  *   Flow matcher.
9072  * @param[in, out] key
9073  *   Flow matcher value.
9074  * @param[in] item
9075  *   Flow pattern to translate.
9076  * @param[out] error
9077  *   Pointer to error structure.
9078  */
9079 static int
9080 flow_dv_translate_item_geneve_opt(struct rte_eth_dev *dev, void *matcher,
9081                                   void *key, const struct rte_flow_item *item,
9082                                   struct rte_flow_error *error)
9083 {
9084         const struct rte_flow_item_geneve_opt *geneve_opt_m = item->mask;
9085         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
9086         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9087         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9088         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9089                         misc_parameters_3);
9090         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9091         rte_be32_t opt_data_key = 0, opt_data_mask = 0;
9092         int ret = 0;
9093
9094         if (!geneve_opt_v)
9095                 return -1;
9096         if (!geneve_opt_m)
9097                 geneve_opt_m = &rte_flow_item_geneve_opt_mask;
9098         ret = flow_dev_geneve_tlv_option_resource_register(dev, item,
9099                                                            error);
9100         if (ret) {
9101                 DRV_LOG(ERR, "Failed to create geneve_tlv_obj");
9102                 return ret;
9103         }
9104         /*
9105          * Set the option length in GENEVE header if not requested.
9106          * The GENEVE TLV option length is expressed by the option length field
9107          * in the GENEVE header.
9108          * If the option length was not requested but the GENEVE TLV option item
9109          * is present we set the option length field implicitly.
9110          */
9111         if (!MLX5_GET16(fte_match_set_misc, misc_m, geneve_opt_len)) {
9112                 MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
9113                          MLX5_GENEVE_OPTLEN_MASK);
9114                 MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
9115                          geneve_opt_v->option_len + 1);
9116         }
9117         /* Set the data. */
9118         if (geneve_opt_v->data) {
9119                 memcpy(&opt_data_key, geneve_opt_v->data,
9120                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
9121                                 sizeof(opt_data_key)));
9122                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
9123                                 sizeof(opt_data_key));
9124                 memcpy(&opt_data_mask, geneve_opt_m->data,
9125                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
9126                                 sizeof(opt_data_mask)));
9127                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
9128                                 sizeof(opt_data_mask));
9129                 MLX5_SET(fte_match_set_misc3, misc3_m,
9130                                 geneve_tlv_option_0_data,
9131                                 rte_be_to_cpu_32(opt_data_mask));
9132                 MLX5_SET(fte_match_set_misc3, misc3_v,
9133                                 geneve_tlv_option_0_data,
9134                         rte_be_to_cpu_32(opt_data_key & opt_data_mask));
9135         }
9136         return ret;
9137 }
9138
9139 /**
9140  * Add MPLS item to matcher and to the value.
9141  *
9142  * @param[in, out] matcher
9143  *   Flow matcher.
9144  * @param[in, out] key
9145  *   Flow matcher value.
9146  * @param[in] item
9147  *   Flow pattern to translate.
9148  * @param[in] prev_layer
9149  *   The protocol layer indicated in previous item.
9150  * @param[in] inner
9151  *   Item is inner pattern.
9152  */
9153 static void
9154 flow_dv_translate_item_mpls(void *matcher, void *key,
9155                             const struct rte_flow_item *item,
9156                             uint64_t prev_layer,
9157                             int inner)
9158 {
9159         const uint32_t *in_mpls_m = item->mask;
9160         const uint32_t *in_mpls_v = item->spec;
9161         uint32_t *out_mpls_m = 0;
9162         uint32_t *out_mpls_v = 0;
9163         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9164         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9165         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
9166                                      misc_parameters_2);
9167         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
9168         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
9169         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9170
9171         switch (prev_layer) {
9172         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
9173                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
9174                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
9175                          MLX5_UDP_PORT_MPLS);
9176                 break;
9177         case MLX5_FLOW_LAYER_GRE:
9178                 /* Fall-through. */
9179         case MLX5_FLOW_LAYER_GRE_KEY:
9180                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
9181                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
9182                          RTE_ETHER_TYPE_MPLS);
9183                 break;
9184         default:
9185                 break;
9186         }
9187         if (!in_mpls_v)
9188                 return;
9189         if (!in_mpls_m)
9190                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
9191         switch (prev_layer) {
9192         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
9193                 out_mpls_m =
9194                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
9195                                                  outer_first_mpls_over_udp);
9196                 out_mpls_v =
9197                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
9198                                                  outer_first_mpls_over_udp);
9199                 break;
9200         case MLX5_FLOW_LAYER_GRE:
9201                 out_mpls_m =
9202                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
9203                                                  outer_first_mpls_over_gre);
9204                 out_mpls_v =
9205                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
9206                                                  outer_first_mpls_over_gre);
9207                 break;
9208         default:
9209                 /* Inner MPLS not over GRE is not supported. */
9210                 if (!inner) {
9211                         out_mpls_m =
9212                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
9213                                                          misc2_m,
9214                                                          outer_first_mpls);
9215                         out_mpls_v =
9216                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
9217                                                          misc2_v,
9218                                                          outer_first_mpls);
9219                 }
9220                 break;
9221         }
9222         if (out_mpls_m && out_mpls_v) {
9223                 *out_mpls_m = *in_mpls_m;
9224                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
9225         }
9226 }
9227
9228 /**
9229  * Add metadata register item to matcher
9230  *
9231  * @param[in, out] matcher
9232  *   Flow matcher.
9233  * @param[in, out] key
9234  *   Flow matcher value.
9235  * @param[in] reg_type
9236  *   Type of device metadata register
9237  * @param[in] value
9238  *   Register value
9239  * @param[in] mask
9240  *   Register mask
9241  */
9242 static void
9243 flow_dv_match_meta_reg(void *matcher, void *key,
9244                        enum modify_reg reg_type,
9245                        uint32_t data, uint32_t mask)
9246 {
9247         void *misc2_m =
9248                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
9249         void *misc2_v =
9250                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
9251         uint32_t temp;
9252
9253         data &= mask;
9254         switch (reg_type) {
9255         case REG_A:
9256                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
9257                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
9258                 break;
9259         case REG_B:
9260                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
9261                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
9262                 break;
9263         case REG_C_0:
9264                 /*
9265                  * The metadata register C0 field might be divided into
9266                  * source vport index and META item value, we should set
9267                  * this field according to specified mask, not as whole one.
9268                  */
9269                 temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
9270                 temp |= mask;
9271                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
9272                 temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
9273                 temp &= ~mask;
9274                 temp |= data;
9275                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
9276                 break;
9277         case REG_C_1:
9278                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
9279                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
9280                 break;
9281         case REG_C_2:
9282                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
9283                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
9284                 break;
9285         case REG_C_3:
9286                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
9287                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
9288                 break;
9289         case REG_C_4:
9290                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
9291                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
9292                 break;
9293         case REG_C_5:
9294                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
9295                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
9296                 break;
9297         case REG_C_6:
9298                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
9299                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
9300                 break;
9301         case REG_C_7:
9302                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
9303                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
9304                 break;
9305         default:
9306                 MLX5_ASSERT(false);
9307                 break;
9308         }
9309 }
9310
9311 /**
9312  * Add MARK item to matcher
9313  *
9314  * @param[in] dev
9315  *   The device to configure through.
9316  * @param[in, out] matcher
9317  *   Flow matcher.
9318  * @param[in, out] key
9319  *   Flow matcher value.
9320  * @param[in] item
9321  *   Flow pattern to translate.
9322  */
9323 static void
9324 flow_dv_translate_item_mark(struct rte_eth_dev *dev,
9325                             void *matcher, void *key,
9326                             const struct rte_flow_item *item)
9327 {
9328         struct mlx5_priv *priv = dev->data->dev_private;
9329         const struct rte_flow_item_mark *mark;
9330         uint32_t value;
9331         uint32_t mask;
9332
9333         mark = item->mask ? (const void *)item->mask :
9334                             &rte_flow_item_mark_mask;
9335         mask = mark->id & priv->sh->dv_mark_mask;
9336         mark = (const void *)item->spec;
9337         MLX5_ASSERT(mark);
9338         value = mark->id & priv->sh->dv_mark_mask & mask;
9339         if (mask) {
9340                 enum modify_reg reg;
9341
9342                 /* Get the metadata register index for the mark. */
9343                 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
9344                 MLX5_ASSERT(reg > 0);
9345                 if (reg == REG_C_0) {
9346                         struct mlx5_priv *priv = dev->data->dev_private;
9347                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9348                         uint32_t shl_c0 = rte_bsf32(msk_c0);
9349
9350                         mask &= msk_c0;
9351                         mask <<= shl_c0;
9352                         value <<= shl_c0;
9353                 }
9354                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
9355         }
9356 }
9357
9358 /**
9359  * Add META item to matcher
9360  *
9361  * @param[in] dev
9362  *   The devich to configure through.
9363  * @param[in, out] matcher
9364  *   Flow matcher.
9365  * @param[in, out] key
9366  *   Flow matcher value.
9367  * @param[in] attr
9368  *   Attributes of flow that includes this item.
9369  * @param[in] item
9370  *   Flow pattern to translate.
9371  */
9372 static void
9373 flow_dv_translate_item_meta(struct rte_eth_dev *dev,
9374                             void *matcher, void *key,
9375                             const struct rte_flow_attr *attr,
9376                             const struct rte_flow_item *item)
9377 {
9378         const struct rte_flow_item_meta *meta_m;
9379         const struct rte_flow_item_meta *meta_v;
9380
9381         meta_m = (const void *)item->mask;
9382         if (!meta_m)
9383                 meta_m = &rte_flow_item_meta_mask;
9384         meta_v = (const void *)item->spec;
9385         if (meta_v) {
9386                 int reg;
9387                 uint32_t value = meta_v->data;
9388                 uint32_t mask = meta_m->data;
9389
9390                 reg = flow_dv_get_metadata_reg(dev, attr, NULL);
9391                 if (reg < 0)
9392                         return;
9393                 MLX5_ASSERT(reg != REG_NON);
9394                 if (reg == REG_C_0) {
9395                         struct mlx5_priv *priv = dev->data->dev_private;
9396                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9397                         uint32_t shl_c0 = rte_bsf32(msk_c0);
9398
9399                         mask &= msk_c0;
9400                         mask <<= shl_c0;
9401                         value <<= shl_c0;
9402                 }
9403                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
9404         }
9405 }
9406
9407 /**
9408  * Add vport metadata Reg C0 item to matcher
9409  *
9410  * @param[in, out] matcher
9411  *   Flow matcher.
9412  * @param[in, out] key
9413  *   Flow matcher value.
9414  * @param[in] reg
9415  *   Flow pattern to translate.
9416  */
9417 static void
9418 flow_dv_translate_item_meta_vport(void *matcher, void *key,
9419                                   uint32_t value, uint32_t mask)
9420 {
9421         flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
9422 }
9423
9424 /**
9425  * Add tag item to matcher
9426  *
9427  * @param[in] dev
9428  *   The devich to configure through.
9429  * @param[in, out] matcher
9430  *   Flow matcher.
9431  * @param[in, out] key
9432  *   Flow matcher value.
9433  * @param[in] item
9434  *   Flow pattern to translate.
9435  */
9436 static void
9437 flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
9438                                 void *matcher, void *key,
9439                                 const struct rte_flow_item *item)
9440 {
9441         const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
9442         const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
9443         uint32_t mask, value;
9444
9445         MLX5_ASSERT(tag_v);
9446         value = tag_v->data;
9447         mask = tag_m ? tag_m->data : UINT32_MAX;
9448         if (tag_v->id == REG_C_0) {
9449                 struct mlx5_priv *priv = dev->data->dev_private;
9450                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
9451                 uint32_t shl_c0 = rte_bsf32(msk_c0);
9452
9453                 mask &= msk_c0;
9454                 mask <<= shl_c0;
9455                 value <<= shl_c0;
9456         }
9457         flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
9458 }
9459
9460 /**
9461  * Add TAG item to matcher
9462  *
9463  * @param[in] dev
9464  *   The devich to configure through.
9465  * @param[in, out] matcher
9466  *   Flow matcher.
9467  * @param[in, out] key
9468  *   Flow matcher value.
9469  * @param[in] item
9470  *   Flow pattern to translate.
9471  */
9472 static void
9473 flow_dv_translate_item_tag(struct rte_eth_dev *dev,
9474                            void *matcher, void *key,
9475                            const struct rte_flow_item *item)
9476 {
9477         const struct rte_flow_item_tag *tag_v = item->spec;
9478         const struct rte_flow_item_tag *tag_m = item->mask;
9479         enum modify_reg reg;
9480
9481         MLX5_ASSERT(tag_v);
9482         tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
9483         /* Get the metadata register index for the tag. */
9484         reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
9485         MLX5_ASSERT(reg > 0);
9486         flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
9487 }
9488
9489 /**
9490  * Add source vport match to the specified matcher.
9491  *
9492  * @param[in, out] matcher
9493  *   Flow matcher.
9494  * @param[in, out] key
9495  *   Flow matcher value.
9496  * @param[in] port
9497  *   Source vport value to match
9498  * @param[in] mask
9499  *   Mask
9500  */
9501 static void
9502 flow_dv_translate_item_source_vport(void *matcher, void *key,
9503                                     int16_t port, uint16_t mask)
9504 {
9505         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
9506         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
9507
9508         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
9509         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
9510 }
9511
9512 /**
9513  * Translate port-id item to eswitch match on  port-id.
9514  *
9515  * @param[in] dev
9516  *   The devich to configure through.
9517  * @param[in, out] matcher
9518  *   Flow matcher.
9519  * @param[in, out] key
9520  *   Flow matcher value.
9521  * @param[in] item
9522  *   Flow pattern to translate.
9523  * @param[in]
9524  *   Flow attributes.
9525  *
9526  * @return
9527  *   0 on success, a negative errno value otherwise.
9528  */
9529 static int
9530 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
9531                                void *key, const struct rte_flow_item *item,
9532                                const struct rte_flow_attr *attr)
9533 {
9534         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
9535         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
9536         struct mlx5_priv *priv;
9537         uint16_t mask, id;
9538
9539         mask = pid_m ? pid_m->id : 0xffff;
9540         id = pid_v ? pid_v->id : dev->data->port_id;
9541         priv = mlx5_port_to_eswitch_info(id, item == NULL);
9542         if (!priv)
9543                 return -rte_errno;
9544         /*
9545          * Translate to vport field or to metadata, depending on mode.
9546          * Kernel can use either misc.source_port or half of C0 metadata
9547          * register.
9548          */
9549         if (priv->vport_meta_mask) {
9550                 /*
9551                  * Provide the hint for SW steering library
9552                  * to insert the flow into ingress domain and
9553                  * save the extra vport match.
9554                  */
9555                 if (mask == 0xffff && priv->vport_id == 0xffff &&
9556                     priv->pf_bond < 0 && attr->transfer)
9557                         flow_dv_translate_item_source_vport
9558                                 (matcher, key, priv->vport_id, mask);
9559                 /*
9560                  * We should always set the vport metadata register,
9561                  * otherwise the SW steering library can drop
9562                  * the rule if wire vport metadata value is not zero,
9563                  * it depends on kernel configuration.
9564                  */
9565                 flow_dv_translate_item_meta_vport(matcher, key,
9566                                                   priv->vport_meta_tag,
9567                                                   priv->vport_meta_mask);
9568         } else {
9569                 flow_dv_translate_item_source_vport(matcher, key,
9570                                                     priv->vport_id, mask);
9571         }
9572         return 0;
9573 }
9574
9575 /**
9576  * Add ICMP6 item to matcher and to the value.
9577  *
9578  * @param[in, out] matcher
9579  *   Flow matcher.
9580  * @param[in, out] key
9581  *   Flow matcher value.
9582  * @param[in] item
9583  *   Flow pattern to translate.
9584  * @param[in] inner
9585  *   Item is inner pattern.
9586  */
9587 static void
9588 flow_dv_translate_item_icmp6(void *matcher, void *key,
9589                               const struct rte_flow_item *item,
9590                               int inner)
9591 {
9592         const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
9593         const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
9594         void *headers_m;
9595         void *headers_v;
9596         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9597                                      misc_parameters_3);
9598         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9599         if (inner) {
9600                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9601                                          inner_headers);
9602                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9603         } else {
9604                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9605                                          outer_headers);
9606                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9607         }
9608         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
9609         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
9610         if (!icmp6_v)
9611                 return;
9612         if (!icmp6_m)
9613                 icmp6_m = &rte_flow_item_icmp6_mask;
9614         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
9615         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
9616                  icmp6_v->type & icmp6_m->type);
9617         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
9618         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
9619                  icmp6_v->code & icmp6_m->code);
9620 }
9621
9622 /**
9623  * Add ICMP item to matcher and to the value.
9624  *
9625  * @param[in, out] matcher
9626  *   Flow matcher.
9627  * @param[in, out] key
9628  *   Flow matcher value.
9629  * @param[in] item
9630  *   Flow pattern to translate.
9631  * @param[in] inner
9632  *   Item is inner pattern.
9633  */
9634 static void
9635 flow_dv_translate_item_icmp(void *matcher, void *key,
9636                             const struct rte_flow_item *item,
9637                             int inner)
9638 {
9639         const struct rte_flow_item_icmp *icmp_m = item->mask;
9640         const struct rte_flow_item_icmp *icmp_v = item->spec;
9641         uint32_t icmp_header_data_m = 0;
9642         uint32_t icmp_header_data_v = 0;
9643         void *headers_m;
9644         void *headers_v;
9645         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9646                                      misc_parameters_3);
9647         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9648         if (inner) {
9649                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9650                                          inner_headers);
9651                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9652         } else {
9653                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9654                                          outer_headers);
9655                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9656         }
9657         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
9658         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
9659         if (!icmp_v)
9660                 return;
9661         if (!icmp_m)
9662                 icmp_m = &rte_flow_item_icmp_mask;
9663         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
9664                  icmp_m->hdr.icmp_type);
9665         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
9666                  icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
9667         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
9668                  icmp_m->hdr.icmp_code);
9669         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
9670                  icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
9671         icmp_header_data_m = rte_be_to_cpu_16(icmp_m->hdr.icmp_seq_nb);
9672         icmp_header_data_m |= rte_be_to_cpu_16(icmp_m->hdr.icmp_ident) << 16;
9673         if (icmp_header_data_m) {
9674                 icmp_header_data_v = rte_be_to_cpu_16(icmp_v->hdr.icmp_seq_nb);
9675                 icmp_header_data_v |=
9676                          rte_be_to_cpu_16(icmp_v->hdr.icmp_ident) << 16;
9677                 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_header_data,
9678                          icmp_header_data_m);
9679                 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_header_data,
9680                          icmp_header_data_v & icmp_header_data_m);
9681         }
9682 }
9683
9684 /**
9685  * Add GTP item to matcher and to the value.
9686  *
9687  * @param[in, out] matcher
9688  *   Flow matcher.
9689  * @param[in, out] key
9690  *   Flow matcher value.
9691  * @param[in] item
9692  *   Flow pattern to translate.
9693  * @param[in] inner
9694  *   Item is inner pattern.
9695  */
9696 static void
9697 flow_dv_translate_item_gtp(void *matcher, void *key,
9698                            const struct rte_flow_item *item, int inner)
9699 {
9700         const struct rte_flow_item_gtp *gtp_m = item->mask;
9701         const struct rte_flow_item_gtp *gtp_v = item->spec;
9702         void *headers_m;
9703         void *headers_v;
9704         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9705                                      misc_parameters_3);
9706         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9707         uint16_t dport = RTE_GTPU_UDP_PORT;
9708
9709         if (inner) {
9710                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9711                                          inner_headers);
9712                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
9713         } else {
9714                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
9715                                          outer_headers);
9716                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
9717         }
9718         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
9719                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
9720                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
9721         }
9722         if (!gtp_v)
9723                 return;
9724         if (!gtp_m)
9725                 gtp_m = &rte_flow_item_gtp_mask;
9726         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags,
9727                  gtp_m->v_pt_rsv_flags);
9728         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags,
9729                  gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags);
9730         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
9731         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
9732                  gtp_v->msg_type & gtp_m->msg_type);
9733         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,
9734                  rte_be_to_cpu_32(gtp_m->teid));
9735         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,
9736                  rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
9737 }
9738
9739 /**
9740  * Add GTP PSC item to matcher.
9741  *
9742  * @param[in, out] matcher
9743  *   Flow matcher.
9744  * @param[in, out] key
9745  *   Flow matcher value.
9746  * @param[in] item
9747  *   Flow pattern to translate.
9748  */
9749 static int
9750 flow_dv_translate_item_gtp_psc(void *matcher, void *key,
9751                                const struct rte_flow_item *item)
9752 {
9753         const struct rte_flow_item_gtp_psc *gtp_psc_m = item->mask;
9754         const struct rte_flow_item_gtp_psc *gtp_psc_v = item->spec;
9755         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
9756                         misc_parameters_3);
9757         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
9758         union {
9759                 uint32_t w32;
9760                 struct {
9761                         uint16_t seq_num;
9762                         uint8_t npdu_num;
9763                         uint8_t next_ext_header_type;
9764                 };
9765         } dw_2;
9766         uint8_t gtp_flags;
9767
9768         /* Always set E-flag match on one, regardless of GTP item settings. */
9769         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_m, gtpu_msg_flags);
9770         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
9771         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags, gtp_flags);
9772         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_v, gtpu_msg_flags);
9773         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
9774         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags, gtp_flags);
9775         /*Set next extension header type. */
9776         dw_2.seq_num = 0;
9777         dw_2.npdu_num = 0;
9778         dw_2.next_ext_header_type = 0xff;
9779         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_dw_2,
9780                  rte_cpu_to_be_32(dw_2.w32));
9781         dw_2.seq_num = 0;
9782         dw_2.npdu_num = 0;
9783         dw_2.next_ext_header_type = 0x85;
9784         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_dw_2,
9785                  rte_cpu_to_be_32(dw_2.w32));
9786         if (gtp_psc_v) {
9787                 union {
9788                         uint32_t w32;
9789                         struct {
9790                                 uint8_t len;
9791                                 uint8_t type_flags;
9792                                 uint8_t qfi;
9793                                 uint8_t reserved;
9794                         };
9795                 } dw_0;
9796
9797                 /*Set extension header PDU type and Qos. */
9798                 if (!gtp_psc_m)
9799                         gtp_psc_m = &rte_flow_item_gtp_psc_mask;
9800                 dw_0.w32 = 0;
9801                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_m->pdu_type);
9802                 dw_0.qfi = gtp_psc_m->qfi;
9803                 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_first_ext_dw_0,
9804                          rte_cpu_to_be_32(dw_0.w32));
9805                 dw_0.w32 = 0;
9806                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_v->pdu_type &
9807                                                         gtp_psc_m->pdu_type);
9808                 dw_0.qfi = gtp_psc_v->qfi & gtp_psc_m->qfi;
9809                 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_first_ext_dw_0,
9810                          rte_cpu_to_be_32(dw_0.w32));
9811         }
9812         return 0;
9813 }
9814
9815 /**
9816  * Add eCPRI item to matcher and to the value.
9817  *
9818  * @param[in] dev
9819  *   The devich to configure through.
9820  * @param[in, out] matcher
9821  *   Flow matcher.
9822  * @param[in, out] key
9823  *   Flow matcher value.
9824  * @param[in] item
9825  *   Flow pattern to translate.
9826  * @param[in] samples
9827  *   Sample IDs to be used in the matching.
9828  */
9829 static void
9830 flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
9831                              void *key, const struct rte_flow_item *item)
9832 {
9833         struct mlx5_priv *priv = dev->data->dev_private;
9834         const struct rte_flow_item_ecpri *ecpri_m = item->mask;
9835         const struct rte_flow_item_ecpri *ecpri_v = item->spec;
9836         struct rte_ecpri_common_hdr common;
9837         void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
9838                                      misc_parameters_4);
9839         void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
9840         uint32_t *samples;
9841         void *dw_m;
9842         void *dw_v;
9843
9844         if (!ecpri_v)
9845                 return;
9846         if (!ecpri_m)
9847                 ecpri_m = &rte_flow_item_ecpri_mask;
9848         /*
9849          * Maximal four DW samples are supported in a single matching now.
9850          * Two are used now for a eCPRI matching:
9851          * 1. Type: one byte, mask should be 0x00ff0000 in network order
9852          * 2. ID of a message: one or two bytes, mask 0xffff0000 or 0xff000000
9853          *    if any.
9854          */
9855         if (!ecpri_m->hdr.common.u32)
9856                 return;
9857         samples = priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0].ids;
9858         /* Need to take the whole DW as the mask to fill the entry. */
9859         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
9860                             prog_sample_field_value_0);
9861         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
9862                             prog_sample_field_value_0);
9863         /* Already big endian (network order) in the header. */
9864         *(uint32_t *)dw_m = ecpri_m->hdr.common.u32;
9865         *(uint32_t *)dw_v = ecpri_v->hdr.common.u32 & ecpri_m->hdr.common.u32;
9866         /* Sample#0, used for matching type, offset 0. */
9867         MLX5_SET(fte_match_set_misc4, misc4_m,
9868                  prog_sample_field_id_0, samples[0]);
9869         /* It makes no sense to set the sample ID in the mask field. */
9870         MLX5_SET(fte_match_set_misc4, misc4_v,
9871                  prog_sample_field_id_0, samples[0]);
9872         /*
9873          * Checking if message body part needs to be matched.
9874          * Some wildcard rules only matching type field should be supported.
9875          */
9876         if (ecpri_m->hdr.dummy[0]) {
9877                 common.u32 = rte_be_to_cpu_32(ecpri_v->hdr.common.u32);
9878                 switch (common.type) {
9879                 case RTE_ECPRI_MSG_TYPE_IQ_DATA:
9880                 case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
9881                 case RTE_ECPRI_MSG_TYPE_DLY_MSR:
9882                         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
9883                                             prog_sample_field_value_1);
9884                         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
9885                                             prog_sample_field_value_1);
9886                         *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0];
9887                         *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0] &
9888                                             ecpri_m->hdr.dummy[0];
9889                         /* Sample#1, to match message body, offset 4. */
9890                         MLX5_SET(fte_match_set_misc4, misc4_m,
9891                                  prog_sample_field_id_1, samples[1]);
9892                         MLX5_SET(fte_match_set_misc4, misc4_v,
9893                                  prog_sample_field_id_1, samples[1]);
9894                         break;
9895                 default:
9896                         /* Others, do not match any sample ID. */
9897                         break;
9898                 }
9899         }
9900 }
9901
9902 /*
9903  * Add connection tracking status item to matcher
9904  *
9905  * @param[in] dev
9906  *   The devich to configure through.
9907  * @param[in, out] matcher
9908  *   Flow matcher.
9909  * @param[in, out] key
9910  *   Flow matcher value.
9911  * @param[in] item
9912  *   Flow pattern to translate.
9913  */
9914 static void
9915 flow_dv_translate_item_aso_ct(struct rte_eth_dev *dev,
9916                               void *matcher, void *key,
9917                               const struct rte_flow_item *item)
9918 {
9919         uint32_t reg_value = 0;
9920         int reg_id;
9921         /* 8LSB 0b 11/0000/11, middle 4 bits are reserved. */
9922         uint32_t reg_mask = 0;
9923         const struct rte_flow_item_conntrack *spec = item->spec;
9924         const struct rte_flow_item_conntrack *mask = item->mask;
9925         uint32_t flags;
9926         struct rte_flow_error error;
9927
9928         if (!mask)
9929                 mask = &rte_flow_item_conntrack_mask;
9930         if (!spec || !mask->flags)
9931                 return;
9932         flags = spec->flags & mask->flags;
9933         /* The conflict should be checked in the validation. */
9934         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID)
9935                 reg_value |= MLX5_CT_SYNDROME_VALID;
9936         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
9937                 reg_value |= MLX5_CT_SYNDROME_STATE_CHANGE;
9938         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID)
9939                 reg_value |= MLX5_CT_SYNDROME_INVALID;
9940         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)
9941                 reg_value |= MLX5_CT_SYNDROME_TRAP;
9942         if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
9943                 reg_value |= MLX5_CT_SYNDROME_BAD_PACKET;
9944         if (mask->flags & (RTE_FLOW_CONNTRACK_PKT_STATE_VALID |
9945                            RTE_FLOW_CONNTRACK_PKT_STATE_INVALID |
9946                            RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED))
9947                 reg_mask |= 0xc0;
9948         if (mask->flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
9949                 reg_mask |= MLX5_CT_SYNDROME_STATE_CHANGE;
9950         if (mask->flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
9951                 reg_mask |= MLX5_CT_SYNDROME_BAD_PACKET;
9952         /* The REG_C_x value could be saved during startup. */
9953         reg_id = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, &error);
9954         if (reg_id == REG_NON)
9955                 return;
9956         flow_dv_match_meta_reg(matcher, key, (enum modify_reg)reg_id,
9957                                reg_value, reg_mask);
9958 }
9959
9960 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
9961
9962 #define HEADER_IS_ZERO(match_criteria, headers)                              \
9963         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
9964                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
9965
9966 /**
9967  * Calculate flow matcher enable bitmap.
9968  *
9969  * @param match_criteria
9970  *   Pointer to flow matcher criteria.
9971  *
9972  * @return
9973  *   Bitmap of enabled fields.
9974  */
9975 static uint8_t
9976 flow_dv_matcher_enable(uint32_t *match_criteria)
9977 {
9978         uint8_t match_criteria_enable;
9979
9980         match_criteria_enable =
9981                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
9982                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
9983         match_criteria_enable |=
9984                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
9985                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
9986         match_criteria_enable |=
9987                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
9988                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
9989         match_criteria_enable |=
9990                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
9991                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
9992         match_criteria_enable |=
9993                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
9994                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
9995         match_criteria_enable |=
9996                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) <<
9997                 MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT;
9998         match_criteria_enable |=
9999                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_5)) <<
10000                 MLX5_MATCH_CRITERIA_ENABLE_MISC5_BIT;
10001         return match_criteria_enable;
10002 }
10003
10004 static void
10005 __flow_dv_adjust_buf_size(size_t *size, uint8_t match_criteria)
10006 {
10007         /*
10008          * Check flow matching criteria first, subtract misc5/4 length if flow
10009          * doesn't own misc5/4 parameters. In some old rdma-core releases,
10010          * misc5/4 are not supported, and matcher creation failure is expected
10011          * w/o subtration. If misc5 is provided, misc4 must be counted in since
10012          * misc5 is right after misc4.
10013          */
10014         if (!(match_criteria & (1 << MLX5_MATCH_CRITERIA_ENABLE_MISC5_BIT))) {
10015                 *size = MLX5_ST_SZ_BYTES(fte_match_param) -
10016                         MLX5_ST_SZ_BYTES(fte_match_set_misc5);
10017                 if (!(match_criteria & (1 <<
10018                         MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT))) {
10019                         *size -= MLX5_ST_SZ_BYTES(fte_match_set_misc4);
10020                 }
10021         }
10022 }
10023
10024 static struct mlx5_list_entry *
10025 flow_dv_matcher_clone_cb(struct mlx5_list *list __rte_unused,
10026                          struct mlx5_list_entry *entry, void *cb_ctx)
10027 {
10028         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10029         struct mlx5_flow_dv_matcher *ref = ctx->data;
10030         struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
10031                                                             typeof(*tbl), tbl);
10032         struct mlx5_flow_dv_matcher *resource = mlx5_malloc(MLX5_MEM_ANY,
10033                                                             sizeof(*resource),
10034                                                             0, SOCKET_ID_ANY);
10035
10036         if (!resource) {
10037                 rte_flow_error_set(ctx->error, ENOMEM,
10038                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10039                                    "cannot create matcher");
10040                 return NULL;
10041         }
10042         memcpy(resource, entry, sizeof(*resource));
10043         resource->tbl = &tbl->tbl;
10044         return &resource->entry;
10045 }
10046
10047 static void
10048 flow_dv_matcher_clone_free_cb(struct mlx5_list *list __rte_unused,
10049                              struct mlx5_list_entry *entry)
10050 {
10051         mlx5_free(entry);
10052 }
10053
10054 struct mlx5_hlist_entry *
10055 flow_dv_tbl_create_cb(struct mlx5_hlist *list, uint64_t key64, void *cb_ctx)
10056 {
10057         struct mlx5_dev_ctx_shared *sh = list->ctx;
10058         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10059         struct rte_eth_dev *dev = ctx->dev;
10060         struct mlx5_flow_tbl_data_entry *tbl_data;
10061         struct mlx5_flow_tbl_tunnel_prm *tt_prm = ctx->data;
10062         struct rte_flow_error *error = ctx->error;
10063         union mlx5_flow_tbl_key key = { .v64 = key64 };
10064         struct mlx5_flow_tbl_resource *tbl;
10065         void *domain;
10066         uint32_t idx = 0;
10067         int ret;
10068
10069         tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
10070         if (!tbl_data) {
10071                 rte_flow_error_set(error, ENOMEM,
10072                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10073                                    NULL,
10074                                    "cannot allocate flow table data entry");
10075                 return NULL;
10076         }
10077         tbl_data->idx = idx;
10078         tbl_data->tunnel = tt_prm->tunnel;
10079         tbl_data->group_id = tt_prm->group_id;
10080         tbl_data->external = !!tt_prm->external;
10081         tbl_data->tunnel_offload = is_tunnel_offload_active(dev);
10082         tbl_data->is_egress = !!key.is_egress;
10083         tbl_data->is_transfer = !!key.is_fdb;
10084         tbl_data->dummy = !!key.dummy;
10085         tbl_data->level = key.level;
10086         tbl_data->id = key.id;
10087         tbl = &tbl_data->tbl;
10088         if (key.dummy)
10089                 return &tbl_data->entry;
10090         if (key.is_fdb)
10091                 domain = sh->fdb_domain;
10092         else if (key.is_egress)
10093                 domain = sh->tx_domain;
10094         else
10095                 domain = sh->rx_domain;
10096         ret = mlx5_flow_os_create_flow_tbl(domain, key.level, &tbl->obj);
10097         if (ret) {
10098                 rte_flow_error_set(error, ENOMEM,
10099                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10100                                    NULL, "cannot create flow table object");
10101                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
10102                 return NULL;
10103         }
10104         if (key.level != 0) {
10105                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
10106                                         (tbl->obj, &tbl_data->jump.action);
10107                 if (ret) {
10108                         rte_flow_error_set(error, ENOMEM,
10109                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10110                                            NULL,
10111                                            "cannot create flow jump action");
10112                         mlx5_flow_os_destroy_flow_tbl(tbl->obj);
10113                         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
10114                         return NULL;
10115                 }
10116         }
10117         MKSTR(matcher_name, "%s_%s_%u_%u_matcher_list",
10118               key.is_fdb ? "FDB" : "NIC", key.is_egress ? "egress" : "ingress",
10119               key.level, key.id);
10120         mlx5_list_create(&tbl_data->matchers, matcher_name, sh,
10121                          flow_dv_matcher_create_cb,
10122                          flow_dv_matcher_match_cb,
10123                          flow_dv_matcher_remove_cb,
10124                          flow_dv_matcher_clone_cb,
10125                          flow_dv_matcher_clone_free_cb);
10126         return &tbl_data->entry;
10127 }
10128
10129 int
10130 flow_dv_tbl_match_cb(struct mlx5_hlist *list __rte_unused,
10131                      struct mlx5_hlist_entry *entry, uint64_t key64,
10132                      void *cb_ctx __rte_unused)
10133 {
10134         struct mlx5_flow_tbl_data_entry *tbl_data =
10135                 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10136         union mlx5_flow_tbl_key key = { .v64 = key64 };
10137
10138         return tbl_data->level != key.level ||
10139                tbl_data->id != key.id ||
10140                tbl_data->dummy != key.dummy ||
10141                tbl_data->is_transfer != !!key.is_fdb ||
10142                tbl_data->is_egress != !!key.is_egress;
10143 }
10144
10145 /**
10146  * Get a flow table.
10147  *
10148  * @param[in, out] dev
10149  *   Pointer to rte_eth_dev structure.
10150  * @param[in] table_level
10151  *   Table level to use.
10152  * @param[in] egress
10153  *   Direction of the table.
10154  * @param[in] transfer
10155  *   E-Switch or NIC flow.
10156  * @param[in] dummy
10157  *   Dummy entry for dv API.
10158  * @param[in] table_id
10159  *   Table id to use.
10160  * @param[out] error
10161  *   pointer to error structure.
10162  *
10163  * @return
10164  *   Returns tables resource based on the index, NULL in case of failed.
10165  */
10166 struct mlx5_flow_tbl_resource *
10167 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
10168                          uint32_t table_level, uint8_t egress,
10169                          uint8_t transfer,
10170                          bool external,
10171                          const struct mlx5_flow_tunnel *tunnel,
10172                          uint32_t group_id, uint8_t dummy,
10173                          uint32_t table_id,
10174                          struct rte_flow_error *error)
10175 {
10176         struct mlx5_priv *priv = dev->data->dev_private;
10177         union mlx5_flow_tbl_key table_key = {
10178                 {
10179                         .level = table_level,
10180                         .id = table_id,
10181                         .reserved = 0,
10182                         .dummy = !!dummy,
10183                         .is_fdb = !!transfer,
10184                         .is_egress = !!egress,
10185                 }
10186         };
10187         struct mlx5_flow_tbl_tunnel_prm tt_prm = {
10188                 .tunnel = tunnel,
10189                 .group_id = group_id,
10190                 .external = external,
10191         };
10192         struct mlx5_flow_cb_ctx ctx = {
10193                 .dev = dev,
10194                 .error = error,
10195                 .data = &tt_prm,
10196         };
10197         struct mlx5_hlist_entry *entry;
10198         struct mlx5_flow_tbl_data_entry *tbl_data;
10199
10200         entry = mlx5_hlist_register(priv->sh->flow_tbls, table_key.v64, &ctx);
10201         if (!entry) {
10202                 rte_flow_error_set(error, ENOMEM,
10203                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10204                                    "cannot get table");
10205                 return NULL;
10206         }
10207         DRV_LOG(DEBUG, "table_level %u table_id %u "
10208                 "tunnel %u group %u registered.",
10209                 table_level, table_id,
10210                 tunnel ? tunnel->tunnel_id : 0, group_id);
10211         tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10212         return &tbl_data->tbl;
10213 }
10214
10215 void
10216 flow_dv_tbl_remove_cb(struct mlx5_hlist *list,
10217                       struct mlx5_hlist_entry *entry)
10218 {
10219         struct mlx5_dev_ctx_shared *sh = list->ctx;
10220         struct mlx5_flow_tbl_data_entry *tbl_data =
10221                 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
10222
10223         MLX5_ASSERT(entry && sh);
10224         if (tbl_data->jump.action)
10225                 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
10226         if (tbl_data->tbl.obj)
10227                 mlx5_flow_os_destroy_flow_tbl(tbl_data->tbl.obj);
10228         if (tbl_data->tunnel_offload && tbl_data->external) {
10229                 struct mlx5_hlist_entry *he;
10230                 struct mlx5_hlist *tunnel_grp_hash;
10231                 struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
10232                 union tunnel_tbl_key tunnel_key = {
10233                         .tunnel_id = tbl_data->tunnel ?
10234                                         tbl_data->tunnel->tunnel_id : 0,
10235                         .group = tbl_data->group_id
10236                 };
10237                 uint32_t table_level = tbl_data->level;
10238
10239                 tunnel_grp_hash = tbl_data->tunnel ?
10240                                         tbl_data->tunnel->groups :
10241                                         thub->groups;
10242                 he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val, NULL);
10243                 if (he)
10244                         mlx5_hlist_unregister(tunnel_grp_hash, he);
10245                 DRV_LOG(DEBUG,
10246                         "table_level %u id %u tunnel %u group %u released.",
10247                         table_level,
10248                         tbl_data->id,
10249                         tbl_data->tunnel ?
10250                         tbl_data->tunnel->tunnel_id : 0,
10251                         tbl_data->group_id);
10252         }
10253         mlx5_list_destroy(&tbl_data->matchers);
10254         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
10255 }
10256
10257 /**
10258  * Release a flow table.
10259  *
10260  * @param[in] sh
10261  *   Pointer to device shared structure.
10262  * @param[in] tbl
10263  *   Table resource to be released.
10264  *
10265  * @return
10266  *   Returns 0 if table was released, else return 1;
10267  */
10268 static int
10269 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
10270                              struct mlx5_flow_tbl_resource *tbl)
10271 {
10272         struct mlx5_flow_tbl_data_entry *tbl_data =
10273                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
10274
10275         if (!tbl)
10276                 return 0;
10277         return mlx5_hlist_unregister(sh->flow_tbls, &tbl_data->entry);
10278 }
10279
10280 int
10281 flow_dv_matcher_match_cb(struct mlx5_list *list __rte_unused,
10282                          struct mlx5_list_entry *entry, void *cb_ctx)
10283 {
10284         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10285         struct mlx5_flow_dv_matcher *ref = ctx->data;
10286         struct mlx5_flow_dv_matcher *cur = container_of(entry, typeof(*cur),
10287                                                         entry);
10288
10289         return cur->crc != ref->crc ||
10290                cur->priority != ref->priority ||
10291                memcmp((const void *)cur->mask.buf,
10292                       (const void *)ref->mask.buf, ref->mask.size);
10293 }
10294
10295 struct mlx5_list_entry *
10296 flow_dv_matcher_create_cb(struct mlx5_list *list,
10297                           struct mlx5_list_entry *entry __rte_unused,
10298                           void *cb_ctx)
10299 {
10300         struct mlx5_dev_ctx_shared *sh = list->ctx;
10301         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10302         struct mlx5_flow_dv_matcher *ref = ctx->data;
10303         struct mlx5_flow_dv_matcher *resource;
10304         struct mlx5dv_flow_matcher_attr dv_attr = {
10305                 .type = IBV_FLOW_ATTR_NORMAL,
10306                 .match_mask = (void *)&ref->mask,
10307         };
10308         struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
10309                                                             typeof(*tbl), tbl);
10310         int ret;
10311
10312         resource = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*resource), 0,
10313                                SOCKET_ID_ANY);
10314         if (!resource) {
10315                 rte_flow_error_set(ctx->error, ENOMEM,
10316                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10317                                    "cannot create matcher");
10318                 return NULL;
10319         }
10320         *resource = *ref;
10321         dv_attr.match_criteria_enable =
10322                 flow_dv_matcher_enable(resource->mask.buf);
10323         __flow_dv_adjust_buf_size(&ref->mask.size,
10324                                   dv_attr.match_criteria_enable);
10325         dv_attr.priority = ref->priority;
10326         if (tbl->is_egress)
10327                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
10328         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->tbl.obj,
10329                                                &resource->matcher_object);
10330         if (ret) {
10331                 mlx5_free(resource);
10332                 rte_flow_error_set(ctx->error, ENOMEM,
10333                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10334                                    "cannot create matcher");
10335                 return NULL;
10336         }
10337         return &resource->entry;
10338 }
10339
10340 /**
10341  * Register the flow matcher.
10342  *
10343  * @param[in, out] dev
10344  *   Pointer to rte_eth_dev structure.
10345  * @param[in, out] matcher
10346  *   Pointer to flow matcher.
10347  * @param[in, out] key
10348  *   Pointer to flow table key.
10349  * @parm[in, out] dev_flow
10350  *   Pointer to the dev_flow.
10351  * @param[out] error
10352  *   pointer to error structure.
10353  *
10354  * @return
10355  *   0 on success otherwise -errno and errno is set.
10356  */
10357 static int
10358 flow_dv_matcher_register(struct rte_eth_dev *dev,
10359                          struct mlx5_flow_dv_matcher *ref,
10360                          union mlx5_flow_tbl_key *key,
10361                          struct mlx5_flow *dev_flow,
10362                          const struct mlx5_flow_tunnel *tunnel,
10363                          uint32_t group_id,
10364                          struct rte_flow_error *error)
10365 {
10366         struct mlx5_list_entry *entry;
10367         struct mlx5_flow_dv_matcher *resource;
10368         struct mlx5_flow_tbl_resource *tbl;
10369         struct mlx5_flow_tbl_data_entry *tbl_data;
10370         struct mlx5_flow_cb_ctx ctx = {
10371                 .error = error,
10372                 .data = ref,
10373         };
10374         /**
10375          * tunnel offload API requires this registration for cases when
10376          * tunnel match rule was inserted before tunnel set rule.
10377          */
10378         tbl = flow_dv_tbl_resource_get(dev, key->level,
10379                                        key->is_egress, key->is_fdb,
10380                                        dev_flow->external, tunnel,
10381                                        group_id, 0, key->id, error);
10382         if (!tbl)
10383                 return -rte_errno;      /* No need to refill the error info */
10384         tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
10385         ref->tbl = tbl;
10386         entry = mlx5_list_register(&tbl_data->matchers, &ctx);
10387         if (!entry) {
10388                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
10389                 return rte_flow_error_set(error, ENOMEM,
10390                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10391                                           "cannot allocate ref memory");
10392         }
10393         resource = container_of(entry, typeof(*resource), entry);
10394         dev_flow->handle->dvh.matcher = resource;
10395         return 0;
10396 }
10397
10398 struct mlx5_hlist_entry *
10399 flow_dv_tag_create_cb(struct mlx5_hlist *list, uint64_t key, void *ctx)
10400 {
10401         struct mlx5_dev_ctx_shared *sh = list->ctx;
10402         struct rte_flow_error *error = ctx;
10403         struct mlx5_flow_dv_tag_resource *entry;
10404         uint32_t idx = 0;
10405         int ret;
10406
10407         entry = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
10408         if (!entry) {
10409                 rte_flow_error_set(error, ENOMEM,
10410                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10411                                    "cannot allocate resource memory");
10412                 return NULL;
10413         }
10414         entry->idx = idx;
10415         entry->tag_id = key;
10416         ret = mlx5_flow_os_create_flow_action_tag(key,
10417                                                   &entry->action);
10418         if (ret) {
10419                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], idx);
10420                 rte_flow_error_set(error, ENOMEM,
10421                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10422                                    NULL, "cannot create action");
10423                 return NULL;
10424         }
10425         return &entry->entry;
10426 }
10427
10428 int
10429 flow_dv_tag_match_cb(struct mlx5_hlist *list __rte_unused,
10430                      struct mlx5_hlist_entry *entry, uint64_t key,
10431                      void *cb_ctx __rte_unused)
10432 {
10433         struct mlx5_flow_dv_tag_resource *tag =
10434                 container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10435
10436         return key != tag->tag_id;
10437 }
10438
10439 /**
10440  * Find existing tag resource or create and register a new one.
10441  *
10442  * @param dev[in, out]
10443  *   Pointer to rte_eth_dev structure.
10444  * @param[in, out] tag_be24
10445  *   Tag value in big endian then R-shift 8.
10446  * @parm[in, out] dev_flow
10447  *   Pointer to the dev_flow.
10448  * @param[out] error
10449  *   pointer to error structure.
10450  *
10451  * @return
10452  *   0 on success otherwise -errno and errno is set.
10453  */
10454 static int
10455 flow_dv_tag_resource_register
10456                         (struct rte_eth_dev *dev,
10457                          uint32_t tag_be24,
10458                          struct mlx5_flow *dev_flow,
10459                          struct rte_flow_error *error)
10460 {
10461         struct mlx5_priv *priv = dev->data->dev_private;
10462         struct mlx5_flow_dv_tag_resource *resource;
10463         struct mlx5_hlist_entry *entry;
10464
10465         entry = mlx5_hlist_register(priv->sh->tag_table, tag_be24, error);
10466         if (entry) {
10467                 resource = container_of(entry, struct mlx5_flow_dv_tag_resource,
10468                                         entry);
10469                 dev_flow->handle->dvh.rix_tag = resource->idx;
10470                 dev_flow->dv.tag_resource = resource;
10471                 return 0;
10472         }
10473         return -rte_errno;
10474 }
10475
10476 void
10477 flow_dv_tag_remove_cb(struct mlx5_hlist *list,
10478                       struct mlx5_hlist_entry *entry)
10479 {
10480         struct mlx5_dev_ctx_shared *sh = list->ctx;
10481         struct mlx5_flow_dv_tag_resource *tag =
10482                 container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
10483
10484         MLX5_ASSERT(tag && sh && tag->action);
10485         claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
10486         DRV_LOG(DEBUG, "Tag %p: removed.", (void *)tag);
10487         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
10488 }
10489
10490 /**
10491  * Release the tag.
10492  *
10493  * @param dev
10494  *   Pointer to Ethernet device.
10495  * @param tag_idx
10496  *   Tag index.
10497  *
10498  * @return
10499  *   1 while a reference on it exists, 0 when freed.
10500  */
10501 static int
10502 flow_dv_tag_release(struct rte_eth_dev *dev,
10503                     uint32_t tag_idx)
10504 {
10505         struct mlx5_priv *priv = dev->data->dev_private;
10506         struct mlx5_flow_dv_tag_resource *tag;
10507
10508         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
10509         if (!tag)
10510                 return 0;
10511         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
10512                 dev->data->port_id, (void *)tag, tag->entry.ref_cnt);
10513         return mlx5_hlist_unregister(priv->sh->tag_table, &tag->entry);
10514 }
10515
10516 /**
10517  * Translate port ID action to vport.
10518  *
10519  * @param[in] dev
10520  *   Pointer to rte_eth_dev structure.
10521  * @param[in] action
10522  *   Pointer to the port ID action.
10523  * @param[out] dst_port_id
10524  *   The target port ID.
10525  * @param[out] error
10526  *   Pointer to the error structure.
10527  *
10528  * @return
10529  *   0 on success, a negative errno value otherwise and rte_errno is set.
10530  */
10531 static int
10532 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
10533                                  const struct rte_flow_action *action,
10534                                  uint32_t *dst_port_id,
10535                                  struct rte_flow_error *error)
10536 {
10537         uint32_t port;
10538         struct mlx5_priv *priv;
10539         const struct rte_flow_action_port_id *conf =
10540                         (const struct rte_flow_action_port_id *)action->conf;
10541
10542         port = conf->original ? dev->data->port_id : conf->id;
10543         priv = mlx5_port_to_eswitch_info(port, false);
10544         if (!priv)
10545                 return rte_flow_error_set(error, -rte_errno,
10546                                           RTE_FLOW_ERROR_TYPE_ACTION,
10547                                           NULL,
10548                                           "No eswitch info was found for port");
10549 #ifdef HAVE_MLX5DV_DR_CREATE_DEST_IB_PORT
10550         /*
10551          * This parameter is transferred to
10552          * mlx5dv_dr_action_create_dest_ib_port().
10553          */
10554         *dst_port_id = priv->dev_port;
10555 #else
10556         /*
10557          * Legacy mode, no LAG configurations is supported.
10558          * This parameter is transferred to
10559          * mlx5dv_dr_action_create_dest_vport().
10560          */
10561         *dst_port_id = priv->vport_id;
10562 #endif
10563         return 0;
10564 }
10565
10566 /**
10567  * Create a counter with aging configuration.
10568  *
10569  * @param[in] dev
10570  *   Pointer to rte_eth_dev structure.
10571  * @param[in] dev_flow
10572  *   Pointer to the mlx5_flow.
10573  * @param[out] count
10574  *   Pointer to the counter action configuration.
10575  * @param[in] age
10576  *   Pointer to the aging action configuration.
10577  *
10578  * @return
10579  *   Index to flow counter on success, 0 otherwise.
10580  */
10581 static uint32_t
10582 flow_dv_translate_create_counter(struct rte_eth_dev *dev,
10583                                 struct mlx5_flow *dev_flow,
10584                                 const struct rte_flow_action_count *count,
10585                                 const struct rte_flow_action_age *age)
10586 {
10587         uint32_t counter;
10588         struct mlx5_age_param *age_param;
10589
10590         if (count && count->shared)
10591                 counter = flow_dv_counter_get_shared(dev, count->id);
10592         else
10593                 counter = flow_dv_counter_alloc(dev, !!age);
10594         if (!counter || age == NULL)
10595                 return counter;
10596         age_param = flow_dv_counter_idx_get_age(dev, counter);
10597         age_param->context = age->context ? age->context :
10598                 (void *)(uintptr_t)(dev_flow->flow_idx);
10599         age_param->timeout = age->timeout;
10600         age_param->port_id = dev->data->port_id;
10601         __atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);
10602         __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
10603         return counter;
10604 }
10605
10606 /**
10607  * Add Tx queue matcher
10608  *
10609  * @param[in] dev
10610  *   Pointer to the dev struct.
10611  * @param[in, out] matcher
10612  *   Flow matcher.
10613  * @param[in, out] key
10614  *   Flow matcher value.
10615  * @param[in] item
10616  *   Flow pattern to translate.
10617  * @param[in] inner
10618  *   Item is inner pattern.
10619  */
10620 static void
10621 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
10622                                 void *matcher, void *key,
10623                                 const struct rte_flow_item *item)
10624 {
10625         const struct mlx5_rte_flow_item_tx_queue *queue_m;
10626         const struct mlx5_rte_flow_item_tx_queue *queue_v;
10627         void *misc_m =
10628                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
10629         void *misc_v =
10630                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
10631         struct mlx5_txq_ctrl *txq;
10632         uint32_t queue;
10633
10634
10635         queue_m = (const void *)item->mask;
10636         if (!queue_m)
10637                 return;
10638         queue_v = (const void *)item->spec;
10639         if (!queue_v)
10640                 return;
10641         txq = mlx5_txq_get(dev, queue_v->queue);
10642         if (!txq)
10643                 return;
10644         queue = txq->obj->sq->id;
10645         MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue);
10646         MLX5_SET(fte_match_set_misc, misc_v, source_sqn,
10647                  queue & queue_m->queue);
10648         mlx5_txq_release(dev, queue_v->queue);
10649 }
10650
10651 /**
10652  * Set the hash fields according to the @p flow information.
10653  *
10654  * @param[in] dev_flow
10655  *   Pointer to the mlx5_flow.
10656  * @param[in] rss_desc
10657  *   Pointer to the mlx5_flow_rss_desc.
10658  */
10659 static void
10660 flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
10661                        struct mlx5_flow_rss_desc *rss_desc)
10662 {
10663         uint64_t items = dev_flow->handle->layers;
10664         int rss_inner = 0;
10665         uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
10666
10667         dev_flow->hash_fields = 0;
10668 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
10669         if (rss_desc->level >= 2) {
10670                 dev_flow->hash_fields |= IBV_RX_HASH_INNER;
10671                 rss_inner = 1;
10672         }
10673 #endif
10674         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
10675             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
10676                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
10677                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
10678                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
10679                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
10680                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
10681                         else
10682                                 dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
10683                 }
10684         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
10685                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
10686                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
10687                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
10688                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
10689                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
10690                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
10691                         else
10692                                 dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
10693                 }
10694         }
10695         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
10696             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
10697                 if (rss_types & ETH_RSS_UDP) {
10698                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
10699                                 dev_flow->hash_fields |=
10700                                                 IBV_RX_HASH_SRC_PORT_UDP;
10701                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
10702                                 dev_flow->hash_fields |=
10703                                                 IBV_RX_HASH_DST_PORT_UDP;
10704                         else
10705                                 dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
10706                 }
10707         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
10708                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
10709                 if (rss_types & ETH_RSS_TCP) {
10710                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
10711                                 dev_flow->hash_fields |=
10712                                                 IBV_RX_HASH_SRC_PORT_TCP;
10713                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
10714                                 dev_flow->hash_fields |=
10715                                                 IBV_RX_HASH_DST_PORT_TCP;
10716                         else
10717                                 dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
10718                 }
10719         }
10720 }
10721
10722 /**
10723  * Prepare an Rx Hash queue.
10724  *
10725  * @param dev
10726  *   Pointer to Ethernet device.
10727  * @param[in] dev_flow
10728  *   Pointer to the mlx5_flow.
10729  * @param[in] rss_desc
10730  *   Pointer to the mlx5_flow_rss_desc.
10731  * @param[out] hrxq_idx
10732  *   Hash Rx queue index.
10733  *
10734  * @return
10735  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
10736  */
10737 static struct mlx5_hrxq *
10738 flow_dv_hrxq_prepare(struct rte_eth_dev *dev,
10739                      struct mlx5_flow *dev_flow,
10740                      struct mlx5_flow_rss_desc *rss_desc,
10741                      uint32_t *hrxq_idx)
10742 {
10743         struct mlx5_priv *priv = dev->data->dev_private;
10744         struct mlx5_flow_handle *dh = dev_flow->handle;
10745         struct mlx5_hrxq *hrxq;
10746
10747         MLX5_ASSERT(rss_desc->queue_num);
10748         rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
10749         rss_desc->hash_fields = dev_flow->hash_fields;
10750         rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL);
10751         rss_desc->shared_rss = 0;
10752         *hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
10753         if (!*hrxq_idx)
10754                 return NULL;
10755         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
10756                               *hrxq_idx);
10757         return hrxq;
10758 }
10759
10760 /**
10761  * Release sample sub action resource.
10762  *
10763  * @param[in, out] dev
10764  *   Pointer to rte_eth_dev structure.
10765  * @param[in] act_res
10766  *   Pointer to sample sub action resource.
10767  */
10768 static void
10769 flow_dv_sample_sub_actions_release(struct rte_eth_dev *dev,
10770                                    struct mlx5_flow_sub_actions_idx *act_res)
10771 {
10772         if (act_res->rix_hrxq) {
10773                 mlx5_hrxq_release(dev, act_res->rix_hrxq);
10774                 act_res->rix_hrxq = 0;
10775         }
10776         if (act_res->rix_encap_decap) {
10777                 flow_dv_encap_decap_resource_release(dev,
10778                                                      act_res->rix_encap_decap);
10779                 act_res->rix_encap_decap = 0;
10780         }
10781         if (act_res->rix_port_id_action) {
10782                 flow_dv_port_id_action_resource_release(dev,
10783                                                 act_res->rix_port_id_action);
10784                 act_res->rix_port_id_action = 0;
10785         }
10786         if (act_res->rix_tag) {
10787                 flow_dv_tag_release(dev, act_res->rix_tag);
10788                 act_res->rix_tag = 0;
10789         }
10790         if (act_res->rix_jump) {
10791                 flow_dv_jump_tbl_resource_release(dev, act_res->rix_jump);
10792                 act_res->rix_jump = 0;
10793         }
10794 }
10795
10796 int
10797 flow_dv_sample_match_cb(struct mlx5_list *list __rte_unused,
10798                         struct mlx5_list_entry *entry, void *cb_ctx)
10799 {
10800         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10801         struct rte_eth_dev *dev = ctx->dev;
10802         struct mlx5_flow_dv_sample_resource *ctx_resource = ctx->data;
10803         struct mlx5_flow_dv_sample_resource *resource = container_of(entry,
10804                                                               typeof(*resource),
10805                                                               entry);
10806
10807         if (ctx_resource->ratio == resource->ratio &&
10808             ctx_resource->ft_type == resource->ft_type &&
10809             ctx_resource->ft_id == resource->ft_id &&
10810             ctx_resource->set_action == resource->set_action &&
10811             !memcmp((void *)&ctx_resource->sample_act,
10812                     (void *)&resource->sample_act,
10813                     sizeof(struct mlx5_flow_sub_actions_list))) {
10814                 /*
10815                  * Existing sample action should release the prepared
10816                  * sub-actions reference counter.
10817                  */
10818                 flow_dv_sample_sub_actions_release(dev,
10819                                                    &ctx_resource->sample_idx);
10820                 return 0;
10821         }
10822         return 1;
10823 }
10824
10825 struct mlx5_list_entry *
10826 flow_dv_sample_create_cb(struct mlx5_list *list __rte_unused,
10827                          struct mlx5_list_entry *entry __rte_unused,
10828                          void *cb_ctx)
10829 {
10830         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10831         struct rte_eth_dev *dev = ctx->dev;
10832         struct mlx5_flow_dv_sample_resource *ctx_resource = ctx->data;
10833         void **sample_dv_actions = ctx_resource->sub_actions;
10834         struct mlx5_flow_dv_sample_resource *resource;
10835         struct mlx5dv_dr_flow_sampler_attr sampler_attr;
10836         struct mlx5_priv *priv = dev->data->dev_private;
10837         struct mlx5_dev_ctx_shared *sh = priv->sh;
10838         struct mlx5_flow_tbl_resource *tbl;
10839         uint32_t idx = 0;
10840         const uint32_t next_ft_step = 1;
10841         uint32_t next_ft_id = ctx_resource->ft_id + next_ft_step;
10842         uint8_t is_egress = 0;
10843         uint8_t is_transfer = 0;
10844         struct rte_flow_error *error = ctx->error;
10845
10846         /* Register new sample resource. */
10847         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
10848         if (!resource) {
10849                 rte_flow_error_set(error, ENOMEM,
10850                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10851                                           NULL,
10852                                           "cannot allocate resource memory");
10853                 return NULL;
10854         }
10855         *resource = *ctx_resource;
10856         /* Create normal path table level */
10857         if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
10858                 is_transfer = 1;
10859         else if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
10860                 is_egress = 1;
10861         tbl = flow_dv_tbl_resource_get(dev, next_ft_id,
10862                                         is_egress, is_transfer,
10863                                         true, NULL, 0, 0, 0, error);
10864         if (!tbl) {
10865                 rte_flow_error_set(error, ENOMEM,
10866                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10867                                           NULL,
10868                                           "fail to create normal path table "
10869                                           "for sample");
10870                 goto error;
10871         }
10872         resource->normal_path_tbl = tbl;
10873         if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
10874                 if (!sh->default_miss_action) {
10875                         rte_flow_error_set(error, ENOMEM,
10876                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10877                                                 NULL,
10878                                                 "default miss action was not "
10879                                                 "created");
10880                         goto error;
10881                 }
10882                 sample_dv_actions[ctx_resource->sample_act.actions_num++] =
10883                                                 sh->default_miss_action;
10884         }
10885         /* Create a DR sample action */
10886         sampler_attr.sample_ratio = resource->ratio;
10887         sampler_attr.default_next_table = tbl->obj;
10888         sampler_attr.num_sample_actions = ctx_resource->sample_act.actions_num;
10889         sampler_attr.sample_actions = (struct mlx5dv_dr_action **)
10890                                                         &sample_dv_actions[0];
10891         sampler_attr.action = resource->set_action;
10892         if (mlx5_os_flow_dr_create_flow_action_sampler
10893                         (&sampler_attr, &resource->verbs_action)) {
10894                 rte_flow_error_set(error, ENOMEM,
10895                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10896                                         NULL, "cannot create sample action");
10897                 goto error;
10898         }
10899         resource->idx = idx;
10900         resource->dev = dev;
10901         return &resource->entry;
10902 error:
10903         if (resource->ft_type != MLX5DV_FLOW_TABLE_TYPE_FDB)
10904                 flow_dv_sample_sub_actions_release(dev,
10905                                                    &resource->sample_idx);
10906         if (resource->normal_path_tbl)
10907                 flow_dv_tbl_resource_release(MLX5_SH(dev),
10908                                 resource->normal_path_tbl);
10909         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE], idx);
10910         return NULL;
10911
10912 }
10913
10914 struct mlx5_list_entry *
10915 flow_dv_sample_clone_cb(struct mlx5_list *list __rte_unused,
10916                          struct mlx5_list_entry *entry __rte_unused,
10917                          void *cb_ctx)
10918 {
10919         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10920         struct rte_eth_dev *dev = ctx->dev;
10921         struct mlx5_flow_dv_sample_resource *resource;
10922         struct mlx5_priv *priv = dev->data->dev_private;
10923         struct mlx5_dev_ctx_shared *sh = priv->sh;
10924         uint32_t idx = 0;
10925
10926         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
10927         if (!resource) {
10928                 rte_flow_error_set(ctx->error, ENOMEM,
10929                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10930                                           NULL,
10931                                           "cannot allocate resource memory");
10932                 return NULL;
10933         }
10934         memcpy(resource, entry, sizeof(*resource));
10935         resource->idx = idx;
10936         resource->dev = dev;
10937         return &resource->entry;
10938 }
10939
10940 void
10941 flow_dv_sample_clone_free_cb(struct mlx5_list *list __rte_unused,
10942                          struct mlx5_list_entry *entry)
10943 {
10944         struct mlx5_flow_dv_sample_resource *resource =
10945                         container_of(entry, typeof(*resource), entry);
10946         struct rte_eth_dev *dev = resource->dev;
10947         struct mlx5_priv *priv = dev->data->dev_private;
10948
10949         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
10950                         resource->idx);
10951 }
10952
10953 /**
10954  * Find existing sample resource or create and register a new one.
10955  *
10956  * @param[in, out] dev
10957  *   Pointer to rte_eth_dev structure.
10958  * @param[in] ref
10959  *   Pointer to sample resource reference.
10960  * @parm[in, out] dev_flow
10961  *   Pointer to the dev_flow.
10962  * @param[out] error
10963  *   pointer to error structure.
10964  *
10965  * @return
10966  *   0 on success otherwise -errno and errno is set.
10967  */
10968 static int
10969 flow_dv_sample_resource_register(struct rte_eth_dev *dev,
10970                          struct mlx5_flow_dv_sample_resource *ref,
10971                          struct mlx5_flow *dev_flow,
10972                          struct rte_flow_error *error)
10973 {
10974         struct mlx5_flow_dv_sample_resource *resource;
10975         struct mlx5_list_entry *entry;
10976         struct mlx5_priv *priv = dev->data->dev_private;
10977         struct mlx5_flow_cb_ctx ctx = {
10978                 .dev = dev,
10979                 .error = error,
10980                 .data = ref,
10981         };
10982
10983         entry = mlx5_list_register(&priv->sh->sample_action_list, &ctx);
10984         if (!entry)
10985                 return -rte_errno;
10986         resource = container_of(entry, typeof(*resource), entry);
10987         dev_flow->handle->dvh.rix_sample = resource->idx;
10988         dev_flow->dv.sample_res = resource;
10989         return 0;
10990 }
10991
10992 int
10993 flow_dv_dest_array_match_cb(struct mlx5_list *list __rte_unused,
10994                             struct mlx5_list_entry *entry, void *cb_ctx)
10995 {
10996         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
10997         struct mlx5_flow_dv_dest_array_resource *ctx_resource = ctx->data;
10998         struct rte_eth_dev *dev = ctx->dev;
10999         struct mlx5_flow_dv_dest_array_resource *resource =
11000                         container_of(entry, typeof(*resource), entry);
11001         uint32_t idx = 0;
11002
11003         if (ctx_resource->num_of_dest == resource->num_of_dest &&
11004             ctx_resource->ft_type == resource->ft_type &&
11005             !memcmp((void *)resource->sample_act,
11006                     (void *)ctx_resource->sample_act,
11007                    (ctx_resource->num_of_dest *
11008                    sizeof(struct mlx5_flow_sub_actions_list)))) {
11009                 /*
11010                  * Existing sample action should release the prepared
11011                  * sub-actions reference counter.
11012                  */
11013                 for (idx = 0; idx < ctx_resource->num_of_dest; idx++)
11014                         flow_dv_sample_sub_actions_release(dev,
11015                                         &ctx_resource->sample_idx[idx]);
11016                 return 0;
11017         }
11018         return 1;
11019 }
11020
11021 struct mlx5_list_entry *
11022 flow_dv_dest_array_create_cb(struct mlx5_list *list __rte_unused,
11023                          struct mlx5_list_entry *entry __rte_unused,
11024                          void *cb_ctx)
11025 {
11026         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11027         struct rte_eth_dev *dev = ctx->dev;
11028         struct mlx5_flow_dv_dest_array_resource *resource;
11029         struct mlx5_flow_dv_dest_array_resource *ctx_resource = ctx->data;
11030         struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 };
11031         struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM];
11032         struct mlx5_priv *priv = dev->data->dev_private;
11033         struct mlx5_dev_ctx_shared *sh = priv->sh;
11034         struct mlx5_flow_sub_actions_list *sample_act;
11035         struct mlx5dv_dr_domain *domain;
11036         uint32_t idx = 0, res_idx = 0;
11037         struct rte_flow_error *error = ctx->error;
11038         uint64_t action_flags;
11039         int ret;
11040
11041         /* Register new destination array resource. */
11042         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
11043                                             &res_idx);
11044         if (!resource) {
11045                 rte_flow_error_set(error, ENOMEM,
11046                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11047                                           NULL,
11048                                           "cannot allocate resource memory");
11049                 return NULL;
11050         }
11051         *resource = *ctx_resource;
11052         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
11053                 domain = sh->fdb_domain;
11054         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
11055                 domain = sh->rx_domain;
11056         else
11057                 domain = sh->tx_domain;
11058         for (idx = 0; idx < ctx_resource->num_of_dest; idx++) {
11059                 dest_attr[idx] = (struct mlx5dv_dr_action_dest_attr *)
11060                                  mlx5_malloc(MLX5_MEM_ZERO,
11061                                  sizeof(struct mlx5dv_dr_action_dest_attr),
11062                                  0, SOCKET_ID_ANY);
11063                 if (!dest_attr[idx]) {
11064                         rte_flow_error_set(error, ENOMEM,
11065                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11066                                            NULL,
11067                                            "cannot allocate resource memory");
11068                         goto error;
11069                 }
11070                 dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST;
11071                 sample_act = &ctx_resource->sample_act[idx];
11072                 action_flags = sample_act->action_flags;
11073                 switch (action_flags) {
11074                 case MLX5_FLOW_ACTION_QUEUE:
11075                         dest_attr[idx]->dest = sample_act->dr_queue_action;
11076                         break;
11077                 case (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_ENCAP):
11078                         dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST_REFORMAT;
11079                         dest_attr[idx]->dest_reformat = &dest_reformat[idx];
11080                         dest_attr[idx]->dest_reformat->reformat =
11081                                         sample_act->dr_encap_action;
11082                         dest_attr[idx]->dest_reformat->dest =
11083                                         sample_act->dr_port_id_action;
11084                         break;
11085                 case MLX5_FLOW_ACTION_PORT_ID:
11086                         dest_attr[idx]->dest = sample_act->dr_port_id_action;
11087                         break;
11088                 case MLX5_FLOW_ACTION_JUMP:
11089                         dest_attr[idx]->dest = sample_act->dr_jump_action;
11090                         break;
11091                 default:
11092                         rte_flow_error_set(error, EINVAL,
11093                                            RTE_FLOW_ERROR_TYPE_ACTION,
11094                                            NULL,
11095                                            "unsupported actions type");
11096                         goto error;
11097                 }
11098         }
11099         /* create a dest array actioin */
11100         ret = mlx5_os_flow_dr_create_flow_action_dest_array
11101                                                 (domain,
11102                                                  resource->num_of_dest,
11103                                                  dest_attr,
11104                                                  &resource->action);
11105         if (ret) {
11106                 rte_flow_error_set(error, ENOMEM,
11107                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11108                                    NULL,
11109                                    "cannot create destination array action");
11110                 goto error;
11111         }
11112         resource->idx = res_idx;
11113         resource->dev = dev;
11114         for (idx = 0; idx < ctx_resource->num_of_dest; idx++)
11115                 mlx5_free(dest_attr[idx]);
11116         return &resource->entry;
11117 error:
11118         for (idx = 0; idx < ctx_resource->num_of_dest; idx++) {
11119                 flow_dv_sample_sub_actions_release(dev,
11120                                                    &resource->sample_idx[idx]);
11121                 if (dest_attr[idx])
11122                         mlx5_free(dest_attr[idx]);
11123         }
11124         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY], res_idx);
11125         return NULL;
11126 }
11127
11128 struct mlx5_list_entry *
11129 flow_dv_dest_array_clone_cb(struct mlx5_list *list __rte_unused,
11130                          struct mlx5_list_entry *entry __rte_unused,
11131                          void *cb_ctx)
11132 {
11133         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
11134         struct rte_eth_dev *dev = ctx->dev;
11135         struct mlx5_flow_dv_dest_array_resource *resource;
11136         struct mlx5_priv *priv = dev->data->dev_private;
11137         struct mlx5_dev_ctx_shared *sh = priv->sh;
11138         uint32_t res_idx = 0;
11139         struct rte_flow_error *error = ctx->error;
11140
11141         resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
11142                                       &res_idx);
11143         if (!resource) {
11144                 rte_flow_error_set(error, ENOMEM,
11145                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11146                                           NULL,
11147                                           "cannot allocate dest-array memory");
11148                 return NULL;
11149         }
11150         memcpy(resource, entry, sizeof(*resource));
11151         resource->idx = res_idx;
11152         resource->dev = dev;
11153         return &resource->entry;
11154 }
11155
11156 void
11157 flow_dv_dest_array_clone_free_cb(struct mlx5_list *list __rte_unused,
11158                              struct mlx5_list_entry *entry)
11159 {
11160         struct mlx5_flow_dv_dest_array_resource *resource =
11161                         container_of(entry, typeof(*resource), entry);
11162         struct rte_eth_dev *dev = resource->dev;
11163         struct mlx5_priv *priv = dev->data->dev_private;
11164
11165         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], resource->idx);
11166 }
11167
11168 /**
11169  * Find existing destination array resource or create and register a new one.
11170  *
11171  * @param[in, out] dev
11172  *   Pointer to rte_eth_dev structure.
11173  * @param[in] ref
11174  *   Pointer to destination array resource reference.
11175  * @parm[in, out] dev_flow
11176  *   Pointer to the dev_flow.
11177  * @param[out] error
11178  *   pointer to error structure.
11179  *
11180  * @return
11181  *   0 on success otherwise -errno and errno is set.
11182  */
11183 static int
11184 flow_dv_dest_array_resource_register(struct rte_eth_dev *dev,
11185                          struct mlx5_flow_dv_dest_array_resource *ref,
11186                          struct mlx5_flow *dev_flow,
11187                          struct rte_flow_error *error)
11188 {
11189         struct mlx5_flow_dv_dest_array_resource *resource;
11190         struct mlx5_priv *priv = dev->data->dev_private;
11191         struct mlx5_list_entry *entry;
11192         struct mlx5_flow_cb_ctx ctx = {
11193                 .dev = dev,
11194                 .error = error,
11195                 .data = ref,
11196         };
11197
11198         entry = mlx5_list_register(&priv->sh->dest_array_list, &ctx);
11199         if (!entry)
11200                 return -rte_errno;
11201         resource = container_of(entry, typeof(*resource), entry);
11202         dev_flow->handle->dvh.rix_dest_array = resource->idx;
11203         dev_flow->dv.dest_array_res = resource;
11204         return 0;
11205 }
11206
11207 /**
11208  * Convert Sample action to DV specification.
11209  *
11210  * @param[in] dev
11211  *   Pointer to rte_eth_dev structure.
11212  * @param[in] action
11213  *   Pointer to sample action structure.
11214  * @param[in, out] dev_flow
11215  *   Pointer to the mlx5_flow.
11216  * @param[in] attr
11217  *   Pointer to the flow attributes.
11218  * @param[in, out] num_of_dest
11219  *   Pointer to the num of destination.
11220  * @param[in, out] sample_actions
11221  *   Pointer to sample actions list.
11222  * @param[in, out] res
11223  *   Pointer to sample resource.
11224  * @param[out] error
11225  *   Pointer to the error structure.
11226  *
11227  * @return
11228  *   0 on success, a negative errno value otherwise and rte_errno is set.
11229  */
11230 static int
11231 flow_dv_translate_action_sample(struct rte_eth_dev *dev,
11232                                 const struct rte_flow_action_sample *action,
11233                                 struct mlx5_flow *dev_flow,
11234                                 const struct rte_flow_attr *attr,
11235                                 uint32_t *num_of_dest,
11236                                 void **sample_actions,
11237                                 struct mlx5_flow_dv_sample_resource *res,
11238                                 struct rte_flow_error *error)
11239 {
11240         struct mlx5_priv *priv = dev->data->dev_private;
11241         const struct rte_flow_action *sub_actions;
11242         struct mlx5_flow_sub_actions_list *sample_act;
11243         struct mlx5_flow_sub_actions_idx *sample_idx;
11244         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
11245         struct rte_flow *flow = dev_flow->flow;
11246         struct mlx5_flow_rss_desc *rss_desc;
11247         uint64_t action_flags = 0;
11248
11249         MLX5_ASSERT(wks);
11250         rss_desc = &wks->rss_desc;
11251         sample_act = &res->sample_act;
11252         sample_idx = &res->sample_idx;
11253         res->ratio = action->ratio;
11254         sub_actions = action->actions;
11255         for (; sub_actions->type != RTE_FLOW_ACTION_TYPE_END; sub_actions++) {
11256                 int type = sub_actions->type;
11257                 uint32_t pre_rix = 0;
11258                 void *pre_r;
11259                 switch (type) {
11260                 case RTE_FLOW_ACTION_TYPE_QUEUE:
11261                 {
11262                         const struct rte_flow_action_queue *queue;
11263                         struct mlx5_hrxq *hrxq;
11264                         uint32_t hrxq_idx;
11265
11266                         queue = sub_actions->conf;
11267                         rss_desc->queue_num = 1;
11268                         rss_desc->queue[0] = queue->index;
11269                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11270                                                     rss_desc, &hrxq_idx);
11271                         if (!hrxq)
11272                                 return rte_flow_error_set
11273                                         (error, rte_errno,
11274                                          RTE_FLOW_ERROR_TYPE_ACTION,
11275                                          NULL,
11276                                          "cannot create fate queue");
11277                         sample_act->dr_queue_action = hrxq->action;
11278                         sample_idx->rix_hrxq = hrxq_idx;
11279                         sample_actions[sample_act->actions_num++] =
11280                                                 hrxq->action;
11281                         (*num_of_dest)++;
11282                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
11283                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11284                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11285                         dev_flow->handle->fate_action =
11286                                         MLX5_FLOW_FATE_QUEUE;
11287                         break;
11288                 }
11289                 case RTE_FLOW_ACTION_TYPE_RSS:
11290                 {
11291                         struct mlx5_hrxq *hrxq;
11292                         uint32_t hrxq_idx;
11293                         const struct rte_flow_action_rss *rss;
11294                         const uint8_t *rss_key;
11295
11296                         rss = sub_actions->conf;
11297                         memcpy(rss_desc->queue, rss->queue,
11298                                rss->queue_num * sizeof(uint16_t));
11299                         rss_desc->queue_num = rss->queue_num;
11300                         /* NULL RSS key indicates default RSS key. */
11301                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
11302                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
11303                         /*
11304                          * rss->level and rss.types should be set in advance
11305                          * when expanding items for RSS.
11306                          */
11307                         flow_dv_hashfields_set(dev_flow, rss_desc);
11308                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11309                                                     rss_desc, &hrxq_idx);
11310                         if (!hrxq)
11311                                 return rte_flow_error_set
11312                                         (error, rte_errno,
11313                                          RTE_FLOW_ERROR_TYPE_ACTION,
11314                                          NULL,
11315                                          "cannot create fate queue");
11316                         sample_act->dr_queue_action = hrxq->action;
11317                         sample_idx->rix_hrxq = hrxq_idx;
11318                         sample_actions[sample_act->actions_num++] =
11319                                                 hrxq->action;
11320                         (*num_of_dest)++;
11321                         action_flags |= MLX5_FLOW_ACTION_RSS;
11322                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11323                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11324                         dev_flow->handle->fate_action =
11325                                         MLX5_FLOW_FATE_QUEUE;
11326                         break;
11327                 }
11328                 case RTE_FLOW_ACTION_TYPE_MARK:
11329                 {
11330                         uint32_t tag_be = mlx5_flow_mark_set
11331                                 (((const struct rte_flow_action_mark *)
11332                                 (sub_actions->conf))->id);
11333
11334                         dev_flow->handle->mark = 1;
11335                         pre_rix = dev_flow->handle->dvh.rix_tag;
11336                         /* Save the mark resource before sample */
11337                         pre_r = dev_flow->dv.tag_resource;
11338                         if (flow_dv_tag_resource_register(dev, tag_be,
11339                                                   dev_flow, error))
11340                                 return -rte_errno;
11341                         MLX5_ASSERT(dev_flow->dv.tag_resource);
11342                         sample_act->dr_tag_action =
11343                                 dev_flow->dv.tag_resource->action;
11344                         sample_idx->rix_tag =
11345                                 dev_flow->handle->dvh.rix_tag;
11346                         sample_actions[sample_act->actions_num++] =
11347                                                 sample_act->dr_tag_action;
11348                         /* Recover the mark resource after sample */
11349                         dev_flow->dv.tag_resource = pre_r;
11350                         dev_flow->handle->dvh.rix_tag = pre_rix;
11351                         action_flags |= MLX5_FLOW_ACTION_MARK;
11352                         break;
11353                 }
11354                 case RTE_FLOW_ACTION_TYPE_COUNT:
11355                 {
11356                         if (!flow->counter) {
11357                                 flow->counter =
11358                                         flow_dv_translate_create_counter(dev,
11359                                                 dev_flow, sub_actions->conf,
11360                                                 0);
11361                                 if (!flow->counter)
11362                                         return rte_flow_error_set
11363                                                 (error, rte_errno,
11364                                                 RTE_FLOW_ERROR_TYPE_ACTION,
11365                                                 NULL,
11366                                                 "cannot create counter"
11367                                                 " object.");
11368                         }
11369                         sample_act->dr_cnt_action =
11370                                   (flow_dv_counter_get_by_idx(dev,
11371                                   flow->counter, NULL))->action;
11372                         sample_actions[sample_act->actions_num++] =
11373                                                 sample_act->dr_cnt_action;
11374                         action_flags |= MLX5_FLOW_ACTION_COUNT;
11375                         break;
11376                 }
11377                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
11378                 {
11379                         struct mlx5_flow_dv_port_id_action_resource
11380                                         port_id_resource;
11381                         uint32_t port_id = 0;
11382
11383                         memset(&port_id_resource, 0, sizeof(port_id_resource));
11384                         /* Save the port id resource before sample */
11385                         pre_rix = dev_flow->handle->rix_port_id_action;
11386                         pre_r = dev_flow->dv.port_id_action;
11387                         if (flow_dv_translate_action_port_id(dev, sub_actions,
11388                                                              &port_id, error))
11389                                 return -rte_errno;
11390                         port_id_resource.port_id = port_id;
11391                         if (flow_dv_port_id_action_resource_register
11392                             (dev, &port_id_resource, dev_flow, error))
11393                                 return -rte_errno;
11394                         sample_act->dr_port_id_action =
11395                                 dev_flow->dv.port_id_action->action;
11396                         sample_idx->rix_port_id_action =
11397                                 dev_flow->handle->rix_port_id_action;
11398                         sample_actions[sample_act->actions_num++] =
11399                                                 sample_act->dr_port_id_action;
11400                         /* Recover the port id resource after sample */
11401                         dev_flow->dv.port_id_action = pre_r;
11402                         dev_flow->handle->rix_port_id_action = pre_rix;
11403                         (*num_of_dest)++;
11404                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
11405                         break;
11406                 }
11407                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
11408                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
11409                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
11410                         /* Save the encap resource before sample */
11411                         pre_rix = dev_flow->handle->dvh.rix_encap_decap;
11412                         pre_r = dev_flow->dv.encap_decap;
11413                         if (flow_dv_create_action_l2_encap(dev, sub_actions,
11414                                                            dev_flow,
11415                                                            attr->transfer,
11416                                                            error))
11417                                 return -rte_errno;
11418                         sample_act->dr_encap_action =
11419                                 dev_flow->dv.encap_decap->action;
11420                         sample_idx->rix_encap_decap =
11421                                 dev_flow->handle->dvh.rix_encap_decap;
11422                         sample_actions[sample_act->actions_num++] =
11423                                                 sample_act->dr_encap_action;
11424                         /* Recover the encap resource after sample */
11425                         dev_flow->dv.encap_decap = pre_r;
11426                         dev_flow->handle->dvh.rix_encap_decap = pre_rix;
11427                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
11428                         break;
11429                 default:
11430                         return rte_flow_error_set(error, EINVAL,
11431                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11432                                 NULL,
11433                                 "Not support for sampler action");
11434                 }
11435         }
11436         sample_act->action_flags = action_flags;
11437         res->ft_id = dev_flow->dv.group;
11438         if (attr->transfer) {
11439                 union {
11440                         uint32_t action_in[MLX5_ST_SZ_DW(set_action_in)];
11441                         uint64_t set_action;
11442                 } action_ctx = { .set_action = 0 };
11443
11444                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
11445                 MLX5_SET(set_action_in, action_ctx.action_in, action_type,
11446                          MLX5_MODIFICATION_TYPE_SET);
11447                 MLX5_SET(set_action_in, action_ctx.action_in, field,
11448                          MLX5_MODI_META_REG_C_0);
11449                 MLX5_SET(set_action_in, action_ctx.action_in, data,
11450                          priv->vport_meta_tag);
11451                 res->set_action = action_ctx.set_action;
11452         } else if (attr->ingress) {
11453                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
11454         } else {
11455                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX;
11456         }
11457         return 0;
11458 }
11459
11460 /**
11461  * Convert Sample action to DV specification.
11462  *
11463  * @param[in] dev
11464  *   Pointer to rte_eth_dev structure.
11465  * @param[in, out] dev_flow
11466  *   Pointer to the mlx5_flow.
11467  * @param[in] num_of_dest
11468  *   The num of destination.
11469  * @param[in, out] res
11470  *   Pointer to sample resource.
11471  * @param[in, out] mdest_res
11472  *   Pointer to destination array resource.
11473  * @param[in] sample_actions
11474  *   Pointer to sample path actions list.
11475  * @param[in] action_flags
11476  *   Holds the actions detected until now.
11477  * @param[out] error
11478  *   Pointer to the error structure.
11479  *
11480  * @return
11481  *   0 on success, a negative errno value otherwise and rte_errno is set.
11482  */
11483 static int
11484 flow_dv_create_action_sample(struct rte_eth_dev *dev,
11485                              struct mlx5_flow *dev_flow,
11486                              uint32_t num_of_dest,
11487                              struct mlx5_flow_dv_sample_resource *res,
11488                              struct mlx5_flow_dv_dest_array_resource *mdest_res,
11489                              void **sample_actions,
11490                              uint64_t action_flags,
11491                              struct rte_flow_error *error)
11492 {
11493         /* update normal path action resource into last index of array */
11494         uint32_t dest_index = MLX5_MAX_DEST_NUM - 1;
11495         struct mlx5_flow_sub_actions_list *sample_act =
11496                                         &mdest_res->sample_act[dest_index];
11497         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
11498         struct mlx5_flow_rss_desc *rss_desc;
11499         uint32_t normal_idx = 0;
11500         struct mlx5_hrxq *hrxq;
11501         uint32_t hrxq_idx;
11502
11503         MLX5_ASSERT(wks);
11504         rss_desc = &wks->rss_desc;
11505         if (num_of_dest > 1) {
11506                 if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
11507                         /* Handle QP action for mirroring */
11508                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
11509                                                     rss_desc, &hrxq_idx);
11510                         if (!hrxq)
11511                                 return rte_flow_error_set
11512                                      (error, rte_errno,
11513                                       RTE_FLOW_ERROR_TYPE_ACTION,
11514                                       NULL,
11515                                       "cannot create rx queue");
11516                         normal_idx++;
11517                         mdest_res->sample_idx[dest_index].rix_hrxq = hrxq_idx;
11518                         sample_act->dr_queue_action = hrxq->action;
11519                         if (action_flags & MLX5_FLOW_ACTION_MARK)
11520                                 dev_flow->handle->rix_hrxq = hrxq_idx;
11521                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
11522                 }
11523                 if (sample_act->action_flags & MLX5_FLOW_ACTION_ENCAP) {
11524                         normal_idx++;
11525                         mdest_res->sample_idx[dest_index].rix_encap_decap =
11526                                 dev_flow->handle->dvh.rix_encap_decap;
11527                         sample_act->dr_encap_action =
11528                                 dev_flow->dv.encap_decap->action;
11529                         dev_flow->handle->dvh.rix_encap_decap = 0;
11530                 }
11531                 if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) {
11532                         normal_idx++;
11533                         mdest_res->sample_idx[dest_index].rix_port_id_action =
11534                                 dev_flow->handle->rix_port_id_action;
11535                         sample_act->dr_port_id_action =
11536                                 dev_flow->dv.port_id_action->action;
11537                         dev_flow->handle->rix_port_id_action = 0;
11538                 }
11539                 if (sample_act->action_flags & MLX5_FLOW_ACTION_JUMP) {
11540                         normal_idx++;
11541                         mdest_res->sample_idx[dest_index].rix_jump =
11542                                 dev_flow->handle->rix_jump;
11543                         sample_act->dr_jump_action =
11544                                 dev_flow->dv.jump->action;
11545                         dev_flow->handle->rix_jump = 0;
11546                 }
11547                 sample_act->actions_num = normal_idx;
11548                 /* update sample action resource into first index of array */
11549                 mdest_res->ft_type = res->ft_type;
11550                 memcpy(&mdest_res->sample_idx[0], &res->sample_idx,
11551                                 sizeof(struct mlx5_flow_sub_actions_idx));
11552                 memcpy(&mdest_res->sample_act[0], &res->sample_act,
11553                                 sizeof(struct mlx5_flow_sub_actions_list));
11554                 mdest_res->num_of_dest = num_of_dest;
11555                 if (flow_dv_dest_array_resource_register(dev, mdest_res,
11556                                                          dev_flow, error))
11557                         return rte_flow_error_set(error, EINVAL,
11558                                                   RTE_FLOW_ERROR_TYPE_ACTION,
11559                                                   NULL, "can't create sample "
11560                                                   "action");
11561         } else {
11562                 res->sub_actions = sample_actions;
11563                 if (flow_dv_sample_resource_register(dev, res, dev_flow, error))
11564                         return rte_flow_error_set(error, EINVAL,
11565                                                   RTE_FLOW_ERROR_TYPE_ACTION,
11566                                                   NULL,
11567                                                   "can't create sample action");
11568         }
11569         return 0;
11570 }
11571
11572 /**
11573  * Remove an ASO age action from age actions list.
11574  *
11575  * @param[in] dev
11576  *   Pointer to the Ethernet device structure.
11577  * @param[in] age
11578  *   Pointer to the aso age action handler.
11579  */
11580 static void
11581 flow_dv_aso_age_remove_from_age(struct rte_eth_dev *dev,
11582                                 struct mlx5_aso_age_action *age)
11583 {
11584         struct mlx5_age_info *age_info;
11585         struct mlx5_age_param *age_param = &age->age_params;
11586         struct mlx5_priv *priv = dev->data->dev_private;
11587         uint16_t expected = AGE_CANDIDATE;
11588
11589         age_info = GET_PORT_AGE_INFO(priv);
11590         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
11591                                          AGE_FREE, false, __ATOMIC_RELAXED,
11592                                          __ATOMIC_RELAXED)) {
11593                 /**
11594                  * We need the lock even it is age timeout,
11595                  * since age action may still in process.
11596                  */
11597                 rte_spinlock_lock(&age_info->aged_sl);
11598                 LIST_REMOVE(age, next);
11599                 rte_spinlock_unlock(&age_info->aged_sl);
11600                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
11601         }
11602 }
11603
11604 /**
11605  * Release an ASO age action.
11606  *
11607  * @param[in] dev
11608  *   Pointer to the Ethernet device structure.
11609  * @param[in] age_idx
11610  *   Index of ASO age action to release.
11611  * @param[in] flow
11612  *   True if the release operation is during flow destroy operation.
11613  *   False if the release operation is during action destroy operation.
11614  *
11615  * @return
11616  *   0 when age action was removed, otherwise the number of references.
11617  */
11618 static int
11619 flow_dv_aso_age_release(struct rte_eth_dev *dev, uint32_t age_idx)
11620 {
11621         struct mlx5_priv *priv = dev->data->dev_private;
11622         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11623         struct mlx5_aso_age_action *age = flow_aso_age_get_by_idx(dev, age_idx);
11624         uint32_t ret = __atomic_sub_fetch(&age->refcnt, 1, __ATOMIC_RELAXED);
11625
11626         if (!ret) {
11627                 flow_dv_aso_age_remove_from_age(dev, age);
11628                 rte_spinlock_lock(&mng->free_sl);
11629                 LIST_INSERT_HEAD(&mng->free, age, next);
11630                 rte_spinlock_unlock(&mng->free_sl);
11631         }
11632         return ret;
11633 }
11634
11635 /**
11636  * Resize the ASO age pools array by MLX5_CNT_CONTAINER_RESIZE pools.
11637  *
11638  * @param[in] dev
11639  *   Pointer to the Ethernet device structure.
11640  *
11641  * @return
11642  *   0 on success, otherwise negative errno value and rte_errno is set.
11643  */
11644 static int
11645 flow_dv_aso_age_pools_resize(struct rte_eth_dev *dev)
11646 {
11647         struct mlx5_priv *priv = dev->data->dev_private;
11648         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11649         void *old_pools = mng->pools;
11650         uint32_t resize = mng->n + MLX5_CNT_CONTAINER_RESIZE;
11651         uint32_t mem_size = sizeof(struct mlx5_aso_age_pool *) * resize;
11652         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
11653
11654         if (!pools) {
11655                 rte_errno = ENOMEM;
11656                 return -ENOMEM;
11657         }
11658         if (old_pools) {
11659                 memcpy(pools, old_pools,
11660                        mng->n * sizeof(struct mlx5_flow_counter_pool *));
11661                 mlx5_free(old_pools);
11662         } else {
11663                 /* First ASO flow hit allocation - starting ASO data-path. */
11664                 int ret = mlx5_aso_flow_hit_queue_poll_start(priv->sh);
11665
11666                 if (ret) {
11667                         mlx5_free(pools);
11668                         return ret;
11669                 }
11670         }
11671         mng->n = resize;
11672         mng->pools = pools;
11673         return 0;
11674 }
11675
11676 /**
11677  * Create and initialize a new ASO aging pool.
11678  *
11679  * @param[in] dev
11680  *   Pointer to the Ethernet device structure.
11681  * @param[out] age_free
11682  *   Where to put the pointer of a new age action.
11683  *
11684  * @return
11685  *   The age actions pool pointer and @p age_free is set on success,
11686  *   NULL otherwise and rte_errno is set.
11687  */
11688 static struct mlx5_aso_age_pool *
11689 flow_dv_age_pool_create(struct rte_eth_dev *dev,
11690                         struct mlx5_aso_age_action **age_free)
11691 {
11692         struct mlx5_priv *priv = dev->data->dev_private;
11693         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11694         struct mlx5_aso_age_pool *pool = NULL;
11695         struct mlx5_devx_obj *obj = NULL;
11696         uint32_t i;
11697
11698         obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->ctx,
11699                                                     priv->sh->pdn);
11700         if (!obj) {
11701                 rte_errno = ENODATA;
11702                 DRV_LOG(ERR, "Failed to create flow_hit_aso_obj using DevX.");
11703                 return NULL;
11704         }
11705         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
11706         if (!pool) {
11707                 claim_zero(mlx5_devx_cmd_destroy(obj));
11708                 rte_errno = ENOMEM;
11709                 return NULL;
11710         }
11711         pool->flow_hit_aso_obj = obj;
11712         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
11713         rte_spinlock_lock(&mng->resize_sl);
11714         pool->index = mng->next;
11715         /* Resize pools array if there is no room for the new pool in it. */
11716         if (pool->index == mng->n && flow_dv_aso_age_pools_resize(dev)) {
11717                 claim_zero(mlx5_devx_cmd_destroy(obj));
11718                 mlx5_free(pool);
11719                 rte_spinlock_unlock(&mng->resize_sl);
11720                 return NULL;
11721         }
11722         mng->pools[pool->index] = pool;
11723         mng->next++;
11724         rte_spinlock_unlock(&mng->resize_sl);
11725         /* Assign the first action in the new pool, the rest go to free list. */
11726         *age_free = &pool->actions[0];
11727         for (i = 1; i < MLX5_ASO_AGE_ACTIONS_PER_POOL; i++) {
11728                 pool->actions[i].offset = i;
11729                 LIST_INSERT_HEAD(&mng->free, &pool->actions[i], next);
11730         }
11731         return pool;
11732 }
11733
11734 /**
11735  * Allocate a ASO aging bit.
11736  *
11737  * @param[in] dev
11738  *   Pointer to the Ethernet device structure.
11739  * @param[out] error
11740  *   Pointer to the error structure.
11741  *
11742  * @return
11743  *   Index to ASO age action on success, 0 otherwise and rte_errno is set.
11744  */
11745 static uint32_t
11746 flow_dv_aso_age_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
11747 {
11748         struct mlx5_priv *priv = dev->data->dev_private;
11749         const struct mlx5_aso_age_pool *pool;
11750         struct mlx5_aso_age_action *age_free = NULL;
11751         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
11752
11753         MLX5_ASSERT(mng);
11754         /* Try to get the next free age action bit. */
11755         rte_spinlock_lock(&mng->free_sl);
11756         age_free = LIST_FIRST(&mng->free);
11757         if (age_free) {
11758                 LIST_REMOVE(age_free, next);
11759         } else if (!flow_dv_age_pool_create(dev, &age_free)) {
11760                 rte_spinlock_unlock(&mng->free_sl);
11761                 rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
11762                                    NULL, "failed to create ASO age pool");
11763                 return 0; /* 0 is an error. */
11764         }
11765         rte_spinlock_unlock(&mng->free_sl);
11766         pool = container_of
11767           ((const struct mlx5_aso_age_action (*)[MLX5_ASO_AGE_ACTIONS_PER_POOL])
11768                   (age_free - age_free->offset), const struct mlx5_aso_age_pool,
11769                                                                        actions);
11770         if (!age_free->dr_action) {
11771                 int reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_FLOW_HIT, 0,
11772                                                  error);
11773
11774                 if (reg_c < 0) {
11775                         rte_flow_error_set(error, rte_errno,
11776                                            RTE_FLOW_ERROR_TYPE_ACTION,
11777                                            NULL, "failed to get reg_c "
11778                                            "for ASO flow hit");
11779                         return 0; /* 0 is an error. */
11780                 }
11781 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
11782                 age_free->dr_action = mlx5_glue->dv_create_flow_action_aso
11783                                 (priv->sh->rx_domain,
11784                                  pool->flow_hit_aso_obj->obj, age_free->offset,
11785                                  MLX5DV_DR_ACTION_FLAGS_ASO_FIRST_HIT_SET,
11786                                  (reg_c - REG_C_0));
11787 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
11788                 if (!age_free->dr_action) {
11789                         rte_errno = errno;
11790                         rte_spinlock_lock(&mng->free_sl);
11791                         LIST_INSERT_HEAD(&mng->free, age_free, next);
11792                         rte_spinlock_unlock(&mng->free_sl);
11793                         rte_flow_error_set(error, rte_errno,
11794                                            RTE_FLOW_ERROR_TYPE_ACTION,
11795                                            NULL, "failed to create ASO "
11796                                            "flow hit action");
11797                         return 0; /* 0 is an error. */
11798                 }
11799         }
11800         __atomic_store_n(&age_free->refcnt, 1, __ATOMIC_RELAXED);
11801         return pool->index | ((age_free->offset + 1) << 16);
11802 }
11803
11804 /**
11805  * Initialize flow ASO age parameters.
11806  *
11807  * @param[in] dev
11808  *   Pointer to rte_eth_dev structure.
11809  * @param[in] age_idx
11810  *   Index of ASO age action.
11811  * @param[in] context
11812  *   Pointer to flow counter age context.
11813  * @param[in] timeout
11814  *   Aging timeout in seconds.
11815  *
11816  */
11817 static void
11818 flow_dv_aso_age_params_init(struct rte_eth_dev *dev,
11819                             uint32_t age_idx,
11820                             void *context,
11821                             uint32_t timeout)
11822 {
11823         struct mlx5_aso_age_action *aso_age;
11824
11825         aso_age = flow_aso_age_get_by_idx(dev, age_idx);
11826         MLX5_ASSERT(aso_age);
11827         aso_age->age_params.context = context;
11828         aso_age->age_params.timeout = timeout;
11829         aso_age->age_params.port_id = dev->data->port_id;
11830         __atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0,
11831                          __ATOMIC_RELAXED);
11832         __atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE,
11833                          __ATOMIC_RELAXED);
11834 }
11835
11836 static void
11837 flow_dv_translate_integrity_l4(const struct rte_flow_item_integrity *mask,
11838                                const struct rte_flow_item_integrity *value,
11839                                void *headers_m, void *headers_v)
11840 {
11841         if (mask->l4_ok) {
11842                 /* application l4_ok filter aggregates all hardware l4 filters
11843                  * therefore hw l4_checksum_ok must be implicitly added here.
11844                  */
11845                 struct rte_flow_item_integrity local_item;
11846
11847                 local_item.l4_csum_ok = 1;
11848                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok,
11849                          local_item.l4_csum_ok);
11850                 if (value->l4_ok) {
11851                         /* application l4_ok = 1 matches sets both hw flags
11852                          * l4_ok and l4_checksum_ok flags to 1.
11853                          */
11854                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
11855                                  l4_checksum_ok, local_item.l4_csum_ok);
11856                         MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_ok,
11857                                  mask->l4_ok);
11858                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_ok,
11859                                  value->l4_ok);
11860                 } else {
11861                         /* application l4_ok = 0 matches on hw flag
11862                          * l4_checksum_ok = 0 only.
11863                          */
11864                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
11865                                  l4_checksum_ok, 0);
11866                 }
11867         } else if (mask->l4_csum_ok) {
11868                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok,
11869                          mask->l4_csum_ok);
11870                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_checksum_ok,
11871                          value->l4_csum_ok);
11872         }
11873 }
11874
11875 static void
11876 flow_dv_translate_integrity_l3(const struct rte_flow_item_integrity *mask,
11877                                const struct rte_flow_item_integrity *value,
11878                                void *headers_m, void *headers_v,
11879                                bool is_ipv4)
11880 {
11881         if (mask->l3_ok) {
11882                 /* application l3_ok filter aggregates all hardware l3 filters
11883                  * therefore hw ipv4_checksum_ok must be implicitly added here.
11884                  */
11885                 struct rte_flow_item_integrity local_item;
11886
11887                 local_item.ipv4_csum_ok = !!is_ipv4;
11888                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok,
11889                          local_item.ipv4_csum_ok);
11890                 if (value->l3_ok) {
11891                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
11892                                  ipv4_checksum_ok, local_item.ipv4_csum_ok);
11893                         MLX5_SET(fte_match_set_lyr_2_4, headers_m, l3_ok,
11894                                  mask->l3_ok);
11895                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, l3_ok,
11896                                  value->l3_ok);
11897                 } else {
11898                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
11899                                  ipv4_checksum_ok, 0);
11900                 }
11901         } else if (mask->ipv4_csum_ok) {
11902                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok,
11903                          mask->ipv4_csum_ok);
11904                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_checksum_ok,
11905                          value->ipv4_csum_ok);
11906         }
11907 }
11908
11909 static void
11910 flow_dv_translate_item_integrity(void *matcher, void *key,
11911                                  const struct rte_flow_item *head_item,
11912                                  const struct rte_flow_item *integrity_item)
11913 {
11914         const struct rte_flow_item_integrity *mask = integrity_item->mask;
11915         const struct rte_flow_item_integrity *value = integrity_item->spec;
11916         const struct rte_flow_item *tunnel_item, *end_item, *item;
11917         void *headers_m;
11918         void *headers_v;
11919         uint32_t l3_protocol;
11920
11921         if (!value)
11922                 return;
11923         if (!mask)
11924                 mask = &rte_flow_item_integrity_mask;
11925         if (value->level > 1) {
11926                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
11927                                          inner_headers);
11928                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
11929         } else {
11930                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
11931                                          outer_headers);
11932                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
11933         }
11934         tunnel_item = mlx5_flow_find_tunnel_item(head_item);
11935         if (value->level > 1) {
11936                 /* tunnel item was verified during the item validation */
11937                 item = tunnel_item;
11938                 end_item = mlx5_find_end_item(tunnel_item);
11939         } else {
11940                 item = head_item;
11941                 end_item = tunnel_item ? tunnel_item :
11942                            mlx5_find_end_item(integrity_item);
11943         }
11944         l3_protocol = mask->l3_ok ?
11945                       mlx5_flow_locate_proto_l3(&item, end_item) : 0;
11946         flow_dv_translate_integrity_l3(mask, value, headers_m, headers_v,
11947                                        l3_protocol == RTE_ETHER_TYPE_IPV4);
11948         flow_dv_translate_integrity_l4(mask, value, headers_m, headers_v);
11949 }
11950
11951 /**
11952  * Prepares DV flow counter with aging configuration.
11953  * Gets it by index when exists, creates a new one when doesn't.
11954  *
11955  * @param[in] dev
11956  *   Pointer to rte_eth_dev structure.
11957  * @param[in] dev_flow
11958  *   Pointer to the mlx5_flow.
11959  * @param[in, out] flow
11960  *   Pointer to the sub flow.
11961  * @param[in] count
11962  *   Pointer to the counter action configuration.
11963  * @param[in] age
11964  *   Pointer to the aging action configuration.
11965  * @param[out] error
11966  *   Pointer to the error structure.
11967  *
11968  * @return
11969  *   Pointer to the counter, NULL otherwise.
11970  */
11971 static struct mlx5_flow_counter *
11972 flow_dv_prepare_counter(struct rte_eth_dev *dev,
11973                         struct mlx5_flow *dev_flow,
11974                         struct rte_flow *flow,
11975                         const struct rte_flow_action_count *count,
11976                         const struct rte_flow_action_age *age,
11977                         struct rte_flow_error *error)
11978 {
11979         if (!flow->counter) {
11980                 flow->counter = flow_dv_translate_create_counter(dev, dev_flow,
11981                                                                  count, age);
11982                 if (!flow->counter) {
11983                         rte_flow_error_set(error, rte_errno,
11984                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11985                                            "cannot create counter object.");
11986                         return NULL;
11987                 }
11988         }
11989         return flow_dv_counter_get_by_idx(dev, flow->counter, NULL);
11990 }
11991
11992 /*
11993  * Release an ASO CT action by its own device.
11994  *
11995  * @param[in] dev
11996  *   Pointer to the Ethernet device structure.
11997  * @param[in] idx
11998  *   Index of ASO CT action to release.
11999  *
12000  * @return
12001  *   0 when CT action was removed, otherwise the number of references.
12002  */
12003 static inline int
12004 flow_dv_aso_ct_dev_release(struct rte_eth_dev *dev, uint32_t idx)
12005 {
12006         struct mlx5_priv *priv = dev->data->dev_private;
12007         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12008         uint32_t ret;
12009         struct mlx5_aso_ct_action *ct = flow_aso_ct_get_by_dev_idx(dev, idx);
12010         enum mlx5_aso_ct_state state =
12011                         __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
12012
12013         /* Cannot release when CT is in the ASO SQ. */
12014         if (state == ASO_CONNTRACK_WAIT || state == ASO_CONNTRACK_QUERY)
12015                 return -1;
12016         ret = __atomic_sub_fetch(&ct->refcnt, 1, __ATOMIC_RELAXED);
12017         if (!ret) {
12018                 if (ct->dr_action_orig) {
12019 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12020                         claim_zero(mlx5_glue->destroy_flow_action
12021                                         (ct->dr_action_orig));
12022 #endif
12023                         ct->dr_action_orig = NULL;
12024                 }
12025                 if (ct->dr_action_rply) {
12026 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12027                         claim_zero(mlx5_glue->destroy_flow_action
12028                                         (ct->dr_action_rply));
12029 #endif
12030                         ct->dr_action_rply = NULL;
12031                 }
12032                 /* Clear the state to free, no need in 1st allocation. */
12033                 MLX5_ASO_CT_UPDATE_STATE(ct, ASO_CONNTRACK_FREE);
12034                 rte_spinlock_lock(&mng->ct_sl);
12035                 LIST_INSERT_HEAD(&mng->free_cts, ct, next);
12036                 rte_spinlock_unlock(&mng->ct_sl);
12037         }
12038         return (int)ret;
12039 }
12040
12041 static inline int
12042 flow_dv_aso_ct_release(struct rte_eth_dev *dev, uint32_t own_idx)
12043 {
12044         uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(own_idx);
12045         uint32_t idx = MLX5_INDIRECT_ACT_CT_GET_IDX(own_idx);
12046         struct rte_eth_dev *owndev = &rte_eth_devices[owner];
12047         RTE_SET_USED(dev);
12048
12049         MLX5_ASSERT(owner < RTE_MAX_ETHPORTS);
12050         if (dev->data->dev_started != 1)
12051                 return -1;
12052         return flow_dv_aso_ct_dev_release(owndev, idx);
12053 }
12054
12055 /*
12056  * Resize the ASO CT pools array by 64 pools.
12057  *
12058  * @param[in] dev
12059  *   Pointer to the Ethernet device structure.
12060  *
12061  * @return
12062  *   0 on success, otherwise negative errno value and rte_errno is set.
12063  */
12064 static int
12065 flow_dv_aso_ct_pools_resize(struct rte_eth_dev *dev)
12066 {
12067         struct mlx5_priv *priv = dev->data->dev_private;
12068         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12069         void *old_pools = mng->pools;
12070         /* Magic number now, need a macro. */
12071         uint32_t resize = mng->n + 64;
12072         uint32_t mem_size = sizeof(struct mlx5_aso_ct_pool *) * resize;
12073         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
12074
12075         if (!pools) {
12076                 rte_errno = ENOMEM;
12077                 return -rte_errno;
12078         }
12079         rte_rwlock_write_lock(&mng->resize_rwl);
12080         /* ASO SQ/QP was already initialized in the startup. */
12081         if (old_pools) {
12082                 /* Realloc could be an alternative choice. */
12083                 rte_memcpy(pools, old_pools,
12084                            mng->n * sizeof(struct mlx5_aso_ct_pool *));
12085                 mlx5_free(old_pools);
12086         }
12087         mng->n = resize;
12088         mng->pools = pools;
12089         rte_rwlock_write_unlock(&mng->resize_rwl);
12090         return 0;
12091 }
12092
12093 /*
12094  * Create and initialize a new ASO CT pool.
12095  *
12096  * @param[in] dev
12097  *   Pointer to the Ethernet device structure.
12098  * @param[out] ct_free
12099  *   Where to put the pointer of a new CT action.
12100  *
12101  * @return
12102  *   The CT actions pool pointer and @p ct_free is set on success,
12103  *   NULL otherwise and rte_errno is set.
12104  */
12105 static struct mlx5_aso_ct_pool *
12106 flow_dv_ct_pool_create(struct rte_eth_dev *dev,
12107                        struct mlx5_aso_ct_action **ct_free)
12108 {
12109         struct mlx5_priv *priv = dev->data->dev_private;
12110         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12111         struct mlx5_aso_ct_pool *pool = NULL;
12112         struct mlx5_devx_obj *obj = NULL;
12113         uint32_t i;
12114         uint32_t log_obj_size = rte_log2_u32(MLX5_ASO_CT_ACTIONS_PER_POOL);
12115
12116         obj = mlx5_devx_cmd_create_conn_track_offload_obj(priv->sh->ctx,
12117                                                 priv->sh->pdn, log_obj_size);
12118         if (!obj) {
12119                 rte_errno = ENODATA;
12120                 DRV_LOG(ERR, "Failed to create conn_track_offload_obj using DevX.");
12121                 return NULL;
12122         }
12123         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
12124         if (!pool) {
12125                 rte_errno = ENOMEM;
12126                 claim_zero(mlx5_devx_cmd_destroy(obj));
12127                 return NULL;
12128         }
12129         pool->devx_obj = obj;
12130         pool->index = mng->next;
12131         /* Resize pools array if there is no room for the new pool in it. */
12132         if (pool->index == mng->n && flow_dv_aso_ct_pools_resize(dev)) {
12133                 claim_zero(mlx5_devx_cmd_destroy(obj));
12134                 mlx5_free(pool);
12135                 return NULL;
12136         }
12137         mng->pools[pool->index] = pool;
12138         mng->next++;
12139         /* Assign the first action in the new pool, the rest go to free list. */
12140         *ct_free = &pool->actions[0];
12141         /* Lock outside, the list operation is safe here. */
12142         for (i = 1; i < MLX5_ASO_CT_ACTIONS_PER_POOL; i++) {
12143                 /* refcnt is 0 when allocating the memory. */
12144                 pool->actions[i].offset = i;
12145                 LIST_INSERT_HEAD(&mng->free_cts, &pool->actions[i], next);
12146         }
12147         return pool;
12148 }
12149
12150 /*
12151  * Allocate a ASO CT action from free list.
12152  *
12153  * @param[in] dev
12154  *   Pointer to the Ethernet device structure.
12155  * @param[out] error
12156  *   Pointer to the error structure.
12157  *
12158  * @return
12159  *   Index to ASO CT action on success, 0 otherwise and rte_errno is set.
12160  */
12161 static uint32_t
12162 flow_dv_aso_ct_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
12163 {
12164         struct mlx5_priv *priv = dev->data->dev_private;
12165         struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
12166         struct mlx5_aso_ct_action *ct = NULL;
12167         struct mlx5_aso_ct_pool *pool;
12168         uint8_t reg_c;
12169         uint32_t ct_idx;
12170
12171         MLX5_ASSERT(mng);
12172         if (!priv->config.devx) {
12173                 rte_errno = ENOTSUP;
12174                 return 0;
12175         }
12176         /* Get a free CT action, if no, a new pool will be created. */
12177         rte_spinlock_lock(&mng->ct_sl);
12178         ct = LIST_FIRST(&mng->free_cts);
12179         if (ct) {
12180                 LIST_REMOVE(ct, next);
12181         } else if (!flow_dv_ct_pool_create(dev, &ct)) {
12182                 rte_spinlock_unlock(&mng->ct_sl);
12183                 rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
12184                                    NULL, "failed to create ASO CT pool");
12185                 return 0;
12186         }
12187         rte_spinlock_unlock(&mng->ct_sl);
12188         pool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);
12189         ct_idx = MLX5_MAKE_CT_IDX(pool->index, ct->offset);
12190         /* 0: inactive, 1: created, 2+: used by flows. */
12191         __atomic_store_n(&ct->refcnt, 1, __ATOMIC_RELAXED);
12192         reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, error);
12193         if (!ct->dr_action_orig) {
12194 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12195                 ct->dr_action_orig = mlx5_glue->dv_create_flow_action_aso
12196                         (priv->sh->rx_domain, pool->devx_obj->obj,
12197                          ct->offset,
12198                          MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_INITIATOR,
12199                          reg_c - REG_C_0);
12200 #else
12201                 RTE_SET_USED(reg_c);
12202 #endif
12203                 if (!ct->dr_action_orig) {
12204                         flow_dv_aso_ct_dev_release(dev, ct_idx);
12205                         rte_flow_error_set(error, rte_errno,
12206                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12207                                            "failed to create ASO CT action");
12208                         return 0;
12209                 }
12210         }
12211         if (!ct->dr_action_rply) {
12212 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
12213                 ct->dr_action_rply = mlx5_glue->dv_create_flow_action_aso
12214                         (priv->sh->rx_domain, pool->devx_obj->obj,
12215                          ct->offset,
12216                          MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_RESPONDER,
12217                          reg_c - REG_C_0);
12218 #endif
12219                 if (!ct->dr_action_rply) {
12220                         flow_dv_aso_ct_dev_release(dev, ct_idx);
12221                         rte_flow_error_set(error, rte_errno,
12222                                            RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12223                                            "failed to create ASO CT action");
12224                         return 0;
12225                 }
12226         }
12227         return ct_idx;
12228 }
12229
12230 /*
12231  * Create a conntrack object with context and actions by using ASO mechanism.
12232  *
12233  * @param[in] dev
12234  *   Pointer to rte_eth_dev structure.
12235  * @param[in] pro
12236  *   Pointer to conntrack information profile.
12237  * @param[out] error
12238  *   Pointer to the error structure.
12239  *
12240  * @return
12241  *   Index to conntrack object on success, 0 otherwise.
12242  */
12243 static uint32_t
12244 flow_dv_translate_create_conntrack(struct rte_eth_dev *dev,
12245                                    const struct rte_flow_action_conntrack *pro,
12246                                    struct rte_flow_error *error)
12247 {
12248         struct mlx5_priv *priv = dev->data->dev_private;
12249         struct mlx5_dev_ctx_shared *sh = priv->sh;
12250         struct mlx5_aso_ct_action *ct;
12251         uint32_t idx;
12252
12253         if (!sh->ct_aso_en)
12254                 return rte_flow_error_set(error, ENOTSUP,
12255                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12256                                           "Connection is not supported");
12257         idx = flow_dv_aso_ct_alloc(dev, error);
12258         if (!idx)
12259                 return rte_flow_error_set(error, rte_errno,
12260                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12261                                           "Failed to allocate CT object");
12262         ct = flow_aso_ct_get_by_dev_idx(dev, idx);
12263         if (mlx5_aso_ct_update_by_wqe(sh, ct, pro))
12264                 return rte_flow_error_set(error, EBUSY,
12265                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12266                                           "Failed to update CT");
12267         ct->is_original = !!pro->is_original_dir;
12268         ct->peer = pro->peer_port;
12269         return idx;
12270 }
12271
12272 /**
12273  * Fill the flow with DV spec, lock free
12274  * (mutex should be acquired by caller).
12275  *
12276  * @param[in] dev
12277  *   Pointer to rte_eth_dev structure.
12278  * @param[in, out] dev_flow
12279  *   Pointer to the sub flow.
12280  * @param[in] attr
12281  *   Pointer to the flow attributes.
12282  * @param[in] items
12283  *   Pointer to the list of items.
12284  * @param[in] actions
12285  *   Pointer to the list of actions.
12286  * @param[out] error
12287  *   Pointer to the error structure.
12288  *
12289  * @return
12290  *   0 on success, a negative errno value otherwise and rte_errno is set.
12291  */
12292 static int
12293 flow_dv_translate(struct rte_eth_dev *dev,
12294                   struct mlx5_flow *dev_flow,
12295                   const struct rte_flow_attr *attr,
12296                   const struct rte_flow_item items[],
12297                   const struct rte_flow_action actions[],
12298                   struct rte_flow_error *error)
12299 {
12300         struct mlx5_priv *priv = dev->data->dev_private;
12301         struct mlx5_dev_config *dev_conf = &priv->config;
12302         struct rte_flow *flow = dev_flow->flow;
12303         struct mlx5_flow_handle *handle = dev_flow->handle;
12304         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
12305         struct mlx5_flow_rss_desc *rss_desc;
12306         uint64_t item_flags = 0;
12307         uint64_t last_item = 0;
12308         uint64_t action_flags = 0;
12309         struct mlx5_flow_dv_matcher matcher = {
12310                 .mask = {
12311                         .size = sizeof(matcher.mask.buf),
12312                 },
12313         };
12314         int actions_n = 0;
12315         bool actions_end = false;
12316         union {
12317                 struct mlx5_flow_dv_modify_hdr_resource res;
12318                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
12319                             sizeof(struct mlx5_modification_cmd) *
12320                             (MLX5_MAX_MODIFY_NUM + 1)];
12321         } mhdr_dummy;
12322         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
12323         const struct rte_flow_action_count *count = NULL;
12324         const struct rte_flow_action_age *non_shared_age = NULL;
12325         union flow_dv_attr flow_attr = { .attr = 0 };
12326         uint32_t tag_be;
12327         union mlx5_flow_tbl_key tbl_key;
12328         uint32_t modify_action_position = UINT32_MAX;
12329         void *match_mask = matcher.mask.buf;
12330         void *match_value = dev_flow->dv.value.buf;
12331         uint8_t next_protocol = 0xff;
12332         struct rte_vlan_hdr vlan = { 0 };
12333         struct mlx5_flow_dv_dest_array_resource mdest_res;
12334         struct mlx5_flow_dv_sample_resource sample_res;
12335         void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
12336         const struct rte_flow_action_sample *sample = NULL;
12337         struct mlx5_flow_sub_actions_list *sample_act;
12338         uint32_t sample_act_pos = UINT32_MAX;
12339         uint32_t age_act_pos = UINT32_MAX;
12340         uint32_t num_of_dest = 0;
12341         int tmp_actions_n = 0;
12342         uint32_t table;
12343         int ret = 0;
12344         const struct mlx5_flow_tunnel *tunnel = NULL;
12345         struct flow_grp_info grp_info = {
12346                 .external = !!dev_flow->external,
12347                 .transfer = !!attr->transfer,
12348                 .fdb_def_rule = !!priv->fdb_def_rule,
12349                 .skip_scale = dev_flow->skip_scale &
12350                         (1 << MLX5_SCALE_FLOW_GROUP_BIT),
12351                 .std_tbl_fix = true,
12352         };
12353         const struct rte_flow_item *head_item = items;
12354
12355         if (!wks)
12356                 return rte_flow_error_set(error, ENOMEM,
12357                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12358                                           NULL,
12359                                           "failed to push flow workspace");
12360         rss_desc = &wks->rss_desc;
12361         memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
12362         memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
12363         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
12364                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
12365         /* update normal path action resource into last index of array */
12366         sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
12367         if (is_tunnel_offload_active(dev)) {
12368                 if (dev_flow->tunnel) {
12369                         RTE_VERIFY(dev_flow->tof_type ==
12370                                    MLX5_TUNNEL_OFFLOAD_MISS_RULE);
12371                         tunnel = dev_flow->tunnel;
12372                 } else {
12373                         tunnel = mlx5_get_tof(items, actions,
12374                                               &dev_flow->tof_type);
12375                         dev_flow->tunnel = tunnel;
12376                 }
12377                 grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
12378                                         (dev, attr, tunnel, dev_flow->tof_type);
12379         }
12380         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
12381                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
12382         ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
12383                                        &grp_info, error);
12384         if (ret)
12385                 return ret;
12386         dev_flow->dv.group = table;
12387         if (attr->transfer)
12388                 mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
12389         /* number of actions must be set to 0 in case of dirty stack. */
12390         mhdr_res->actions_num = 0;
12391         if (is_flow_tunnel_match_rule(dev_flow->tof_type)) {
12392                 /*
12393                  * do not add decap action if match rule drops packet
12394                  * HW rejects rules with decap & drop
12395                  *
12396                  * if tunnel match rule was inserted before matching tunnel set
12397                  * rule flow table used in the match rule must be registered.
12398                  * current implementation handles that in the
12399                  * flow_dv_match_register() at the function end.
12400                  */
12401                 bool add_decap = true;
12402                 const struct rte_flow_action *ptr = actions;
12403
12404                 for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
12405                         if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
12406                                 add_decap = false;
12407                                 break;
12408                         }
12409                 }
12410                 if (add_decap) {
12411                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
12412                                                            attr->transfer,
12413                                                            error))
12414                                 return -rte_errno;
12415                         dev_flow->dv.actions[actions_n++] =
12416                                         dev_flow->dv.encap_decap->action;
12417                         action_flags |= MLX5_FLOW_ACTION_DECAP;
12418                 }
12419         }
12420         for (; !actions_end ; actions++) {
12421                 const struct rte_flow_action_queue *queue;
12422                 const struct rte_flow_action_rss *rss;
12423                 const struct rte_flow_action *action = actions;
12424                 const uint8_t *rss_key;
12425                 struct mlx5_flow_tbl_resource *tbl;
12426                 struct mlx5_aso_age_action *age_act;
12427                 struct mlx5_flow_counter *cnt_act;
12428                 uint32_t port_id = 0;
12429                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
12430                 int action_type = actions->type;
12431                 const struct rte_flow_action *found_action = NULL;
12432                 uint32_t jump_group = 0;
12433                 uint32_t owner_idx;
12434                 struct mlx5_aso_ct_action *ct;
12435
12436                 if (!mlx5_flow_os_action_supported(action_type))
12437                         return rte_flow_error_set(error, ENOTSUP,
12438                                                   RTE_FLOW_ERROR_TYPE_ACTION,
12439                                                   actions,
12440                                                   "action not supported");
12441                 switch (action_type) {
12442                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
12443                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
12444                         break;
12445                 case RTE_FLOW_ACTION_TYPE_VOID:
12446                         break;
12447                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
12448                         if (flow_dv_translate_action_port_id(dev, action,
12449                                                              &port_id, error))
12450                                 return -rte_errno;
12451                         port_id_resource.port_id = port_id;
12452                         MLX5_ASSERT(!handle->rix_port_id_action);
12453                         if (flow_dv_port_id_action_resource_register
12454                             (dev, &port_id_resource, dev_flow, error))
12455                                 return -rte_errno;
12456                         dev_flow->dv.actions[actions_n++] =
12457                                         dev_flow->dv.port_id_action->action;
12458                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
12459                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
12460                         sample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID;
12461                         num_of_dest++;
12462                         break;
12463                 case RTE_FLOW_ACTION_TYPE_FLAG:
12464                         action_flags |= MLX5_FLOW_ACTION_FLAG;
12465                         dev_flow->handle->mark = 1;
12466                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
12467                                 struct rte_flow_action_mark mark = {
12468                                         .id = MLX5_FLOW_MARK_DEFAULT,
12469                                 };
12470
12471                                 if (flow_dv_convert_action_mark(dev, &mark,
12472                                                                 mhdr_res,
12473                                                                 error))
12474                                         return -rte_errno;
12475                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
12476                                 break;
12477                         }
12478                         tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
12479                         /*
12480                          * Only one FLAG or MARK is supported per device flow
12481                          * right now. So the pointer to the tag resource must be
12482                          * zero before the register process.
12483                          */
12484                         MLX5_ASSERT(!handle->dvh.rix_tag);
12485                         if (flow_dv_tag_resource_register(dev, tag_be,
12486                                                           dev_flow, error))
12487                                 return -rte_errno;
12488                         MLX5_ASSERT(dev_flow->dv.tag_resource);
12489                         dev_flow->dv.actions[actions_n++] =
12490                                         dev_flow->dv.tag_resource->action;
12491                         break;
12492                 case RTE_FLOW_ACTION_TYPE_MARK:
12493                         action_flags |= MLX5_FLOW_ACTION_MARK;
12494                         dev_flow->handle->mark = 1;
12495                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
12496                                 const struct rte_flow_action_mark *mark =
12497                                         (const struct rte_flow_action_mark *)
12498                                                 actions->conf;
12499
12500                                 if (flow_dv_convert_action_mark(dev, mark,
12501                                                                 mhdr_res,
12502                                                                 error))
12503                                         return -rte_errno;
12504                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
12505                                 break;
12506                         }
12507                         /* Fall-through */
12508                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
12509                         /* Legacy (non-extensive) MARK action. */
12510                         tag_be = mlx5_flow_mark_set
12511                               (((const struct rte_flow_action_mark *)
12512                                (actions->conf))->id);
12513                         MLX5_ASSERT(!handle->dvh.rix_tag);
12514                         if (flow_dv_tag_resource_register(dev, tag_be,
12515                                                           dev_flow, error))
12516                                 return -rte_errno;
12517                         MLX5_ASSERT(dev_flow->dv.tag_resource);
12518                         dev_flow->dv.actions[actions_n++] =
12519                                         dev_flow->dv.tag_resource->action;
12520                         break;
12521                 case RTE_FLOW_ACTION_TYPE_SET_META:
12522                         if (flow_dv_convert_action_set_meta
12523                                 (dev, mhdr_res, attr,
12524                                  (const struct rte_flow_action_set_meta *)
12525                                   actions->conf, error))
12526                                 return -rte_errno;
12527                         action_flags |= MLX5_FLOW_ACTION_SET_META;
12528                         break;
12529                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
12530                         if (flow_dv_convert_action_set_tag
12531                                 (dev, mhdr_res,
12532                                  (const struct rte_flow_action_set_tag *)
12533                                   actions->conf, error))
12534                                 return -rte_errno;
12535                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
12536                         break;
12537                 case RTE_FLOW_ACTION_TYPE_DROP:
12538                         action_flags |= MLX5_FLOW_ACTION_DROP;
12539                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
12540                         break;
12541                 case RTE_FLOW_ACTION_TYPE_QUEUE:
12542                         queue = actions->conf;
12543                         rss_desc->queue_num = 1;
12544                         rss_desc->queue[0] = queue->index;
12545                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
12546                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
12547                         sample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE;
12548                         num_of_dest++;
12549                         break;
12550                 case RTE_FLOW_ACTION_TYPE_RSS:
12551                         rss = actions->conf;
12552                         memcpy(rss_desc->queue, rss->queue,
12553                                rss->queue_num * sizeof(uint16_t));
12554                         rss_desc->queue_num = rss->queue_num;
12555                         /* NULL RSS key indicates default RSS key. */
12556                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
12557                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
12558                         /*
12559                          * rss->level and rss.types should be set in advance
12560                          * when expanding items for RSS.
12561                          */
12562                         action_flags |= MLX5_FLOW_ACTION_RSS;
12563                         dev_flow->handle->fate_action = rss_desc->shared_rss ?
12564                                 MLX5_FLOW_FATE_SHARED_RSS :
12565                                 MLX5_FLOW_FATE_QUEUE;
12566                         break;
12567                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
12568                         flow->age = (uint32_t)(uintptr_t)(action->conf);
12569                         age_act = flow_aso_age_get_by_idx(dev, flow->age);
12570                         __atomic_fetch_add(&age_act->refcnt, 1,
12571                                            __ATOMIC_RELAXED);
12572                         age_act_pos = actions_n++;
12573                         action_flags |= MLX5_FLOW_ACTION_AGE;
12574                         break;
12575                 case RTE_FLOW_ACTION_TYPE_AGE:
12576                         non_shared_age = action->conf;
12577                         age_act_pos = actions_n++;
12578                         action_flags |= MLX5_FLOW_ACTION_AGE;
12579                         break;
12580                 case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
12581                         flow->counter = (uint32_t)(uintptr_t)(action->conf);
12582                         cnt_act = flow_dv_counter_get_by_idx(dev, flow->counter,
12583                                                              NULL);
12584                         __atomic_fetch_add(&cnt_act->shared_info.refcnt, 1,
12585                                            __ATOMIC_RELAXED);
12586                         /* Save information first, will apply later. */
12587                         action_flags |= MLX5_FLOW_ACTION_COUNT;
12588                         break;
12589                 case RTE_FLOW_ACTION_TYPE_COUNT:
12590                         if (!dev_conf->devx) {
12591                                 return rte_flow_error_set
12592                                               (error, ENOTSUP,
12593                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12594                                                NULL,
12595                                                "count action not supported");
12596                         }
12597                         /* Save information first, will apply later. */
12598                         count = action->conf;
12599                         action_flags |= MLX5_FLOW_ACTION_COUNT;
12600                         break;
12601                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
12602                         dev_flow->dv.actions[actions_n++] =
12603                                                 priv->sh->pop_vlan_action;
12604                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
12605                         break;
12606                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
12607                         if (!(action_flags &
12608                               MLX5_FLOW_ACTION_OF_SET_VLAN_VID))
12609                                 flow_dev_get_vlan_info_from_items(items, &vlan);
12610                         vlan.eth_proto = rte_be_to_cpu_16
12611                              ((((const struct rte_flow_action_of_push_vlan *)
12612                                                    actions->conf)->ethertype));
12613                         found_action = mlx5_flow_find_action
12614                                         (actions + 1,
12615                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
12616                         if (found_action)
12617                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
12618                         found_action = mlx5_flow_find_action
12619                                         (actions + 1,
12620                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
12621                         if (found_action)
12622                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
12623                         if (flow_dv_create_action_push_vlan
12624                                             (dev, attr, &vlan, dev_flow, error))
12625                                 return -rte_errno;
12626                         dev_flow->dv.actions[actions_n++] =
12627                                         dev_flow->dv.push_vlan_res->action;
12628                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
12629                         break;
12630                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
12631                         /* of_vlan_push action handled this action */
12632                         MLX5_ASSERT(action_flags &
12633                                     MLX5_FLOW_ACTION_OF_PUSH_VLAN);
12634                         break;
12635                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
12636                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
12637                                 break;
12638                         flow_dev_get_vlan_info_from_items(items, &vlan);
12639                         mlx5_update_vlan_vid_pcp(actions, &vlan);
12640                         /* If no VLAN push - this is a modify header action */
12641                         if (flow_dv_convert_action_modify_vlan_vid
12642                                                 (mhdr_res, actions, error))
12643                                 return -rte_errno;
12644                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
12645                         break;
12646                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
12647                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
12648                         if (flow_dv_create_action_l2_encap(dev, actions,
12649                                                            dev_flow,
12650                                                            attr->transfer,
12651                                                            error))
12652                                 return -rte_errno;
12653                         dev_flow->dv.actions[actions_n++] =
12654                                         dev_flow->dv.encap_decap->action;
12655                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
12656                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
12657                                 sample_act->action_flags |=
12658                                                         MLX5_FLOW_ACTION_ENCAP;
12659                         break;
12660                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
12661                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
12662                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
12663                                                            attr->transfer,
12664                                                            error))
12665                                 return -rte_errno;
12666                         dev_flow->dv.actions[actions_n++] =
12667                                         dev_flow->dv.encap_decap->action;
12668                         action_flags |= MLX5_FLOW_ACTION_DECAP;
12669                         break;
12670                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
12671                         /* Handle encap with preceding decap. */
12672                         if (action_flags & MLX5_FLOW_ACTION_DECAP) {
12673                                 if (flow_dv_create_action_raw_encap
12674                                         (dev, actions, dev_flow, attr, error))
12675                                         return -rte_errno;
12676                                 dev_flow->dv.actions[actions_n++] =
12677                                         dev_flow->dv.encap_decap->action;
12678                         } else {
12679                                 /* Handle encap without preceding decap. */
12680                                 if (flow_dv_create_action_l2_encap
12681                                     (dev, actions, dev_flow, attr->transfer,
12682                                      error))
12683                                         return -rte_errno;
12684                                 dev_flow->dv.actions[actions_n++] =
12685                                         dev_flow->dv.encap_decap->action;
12686                         }
12687                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
12688                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
12689                                 sample_act->action_flags |=
12690                                                         MLX5_FLOW_ACTION_ENCAP;
12691                         break;
12692                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
12693                         while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
12694                                 ;
12695                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
12696                                 if (flow_dv_create_action_l2_decap
12697                                     (dev, dev_flow, attr->transfer, error))
12698                                         return -rte_errno;
12699                                 dev_flow->dv.actions[actions_n++] =
12700                                         dev_flow->dv.encap_decap->action;
12701                         }
12702                         /* If decap is followed by encap, handle it at encap. */
12703                         action_flags |= MLX5_FLOW_ACTION_DECAP;
12704                         break;
12705                 case MLX5_RTE_FLOW_ACTION_TYPE_JUMP:
12706                         dev_flow->dv.actions[actions_n++] =
12707                                 (void *)(uintptr_t)action->conf;
12708                         action_flags |= MLX5_FLOW_ACTION_JUMP;
12709                         break;
12710                 case RTE_FLOW_ACTION_TYPE_JUMP:
12711                         jump_group = ((const struct rte_flow_action_jump *)
12712                                                         action->conf)->group;
12713                         grp_info.std_tbl_fix = 0;
12714                         if (dev_flow->skip_scale &
12715                                 (1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT))
12716                                 grp_info.skip_scale = 1;
12717                         else
12718                                 grp_info.skip_scale = 0;
12719                         ret = mlx5_flow_group_to_table(dev, tunnel,
12720                                                        jump_group,
12721                                                        &table,
12722                                                        &grp_info, error);
12723                         if (ret)
12724                                 return ret;
12725                         tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
12726                                                        attr->transfer,
12727                                                        !!dev_flow->external,
12728                                                        tunnel, jump_group, 0,
12729                                                        0, error);
12730                         if (!tbl)
12731                                 return rte_flow_error_set
12732                                                 (error, errno,
12733                                                  RTE_FLOW_ERROR_TYPE_ACTION,
12734                                                  NULL,
12735                                                  "cannot create jump action.");
12736                         if (flow_dv_jump_tbl_resource_register
12737                             (dev, tbl, dev_flow, error)) {
12738                                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
12739                                 return rte_flow_error_set
12740                                                 (error, errno,
12741                                                  RTE_FLOW_ERROR_TYPE_ACTION,
12742                                                  NULL,
12743                                                  "cannot create jump action.");
12744                         }
12745                         dev_flow->dv.actions[actions_n++] =
12746                                         dev_flow->dv.jump->action;
12747                         action_flags |= MLX5_FLOW_ACTION_JUMP;
12748                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;
12749                         sample_act->action_flags |= MLX5_FLOW_ACTION_JUMP;
12750                         num_of_dest++;
12751                         break;
12752                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
12753                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
12754                         if (flow_dv_convert_action_modify_mac
12755                                         (mhdr_res, actions, error))
12756                                 return -rte_errno;
12757                         action_flags |= actions->type ==
12758                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
12759                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
12760                                         MLX5_FLOW_ACTION_SET_MAC_DST;
12761                         break;
12762                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
12763                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
12764                         if (flow_dv_convert_action_modify_ipv4
12765                                         (mhdr_res, actions, error))
12766                                 return -rte_errno;
12767                         action_flags |= actions->type ==
12768                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
12769                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
12770                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
12771                         break;
12772                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
12773                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
12774                         if (flow_dv_convert_action_modify_ipv6
12775                                         (mhdr_res, actions, error))
12776                                 return -rte_errno;
12777                         action_flags |= actions->type ==
12778                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
12779                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
12780                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
12781                         break;
12782                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
12783                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
12784                         if (flow_dv_convert_action_modify_tp
12785                                         (mhdr_res, actions, items,
12786                                          &flow_attr, dev_flow, !!(action_flags &
12787                                          MLX5_FLOW_ACTION_DECAP), error))
12788                                 return -rte_errno;
12789                         action_flags |= actions->type ==
12790                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
12791                                         MLX5_FLOW_ACTION_SET_TP_SRC :
12792                                         MLX5_FLOW_ACTION_SET_TP_DST;
12793                         break;
12794                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
12795                         if (flow_dv_convert_action_modify_dec_ttl
12796                                         (mhdr_res, items, &flow_attr, dev_flow,
12797                                          !!(action_flags &
12798                                          MLX5_FLOW_ACTION_DECAP), error))
12799                                 return -rte_errno;
12800                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
12801                         break;
12802                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
12803                         if (flow_dv_convert_action_modify_ttl
12804                                         (mhdr_res, actions, items, &flow_attr,
12805                                          dev_flow, !!(action_flags &
12806                                          MLX5_FLOW_ACTION_DECAP), error))
12807                                 return -rte_errno;
12808                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
12809                         break;
12810                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
12811                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
12812                         if (flow_dv_convert_action_modify_tcp_seq
12813                                         (mhdr_res, actions, error))
12814                                 return -rte_errno;
12815                         action_flags |= actions->type ==
12816                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
12817                                         MLX5_FLOW_ACTION_INC_TCP_SEQ :
12818                                         MLX5_FLOW_ACTION_DEC_TCP_SEQ;
12819                         break;
12820
12821                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
12822                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
12823                         if (flow_dv_convert_action_modify_tcp_ack
12824                                         (mhdr_res, actions, error))
12825                                 return -rte_errno;
12826                         action_flags |= actions->type ==
12827                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
12828                                         MLX5_FLOW_ACTION_INC_TCP_ACK :
12829                                         MLX5_FLOW_ACTION_DEC_TCP_ACK;
12830                         break;
12831                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
12832                         if (flow_dv_convert_action_set_reg
12833                                         (mhdr_res, actions, error))
12834                                 return -rte_errno;
12835                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
12836                         break;
12837                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
12838                         if (flow_dv_convert_action_copy_mreg
12839                                         (dev, mhdr_res, actions, error))
12840                                 return -rte_errno;
12841                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
12842                         break;
12843                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
12844                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
12845                         dev_flow->handle->fate_action =
12846                                         MLX5_FLOW_FATE_DEFAULT_MISS;
12847                         break;
12848                 case RTE_FLOW_ACTION_TYPE_METER:
12849                         if (!wks->fm)
12850                                 return rte_flow_error_set(error, rte_errno,
12851                                         RTE_FLOW_ERROR_TYPE_ACTION,
12852                                         NULL, "Failed to get meter in flow.");
12853                         /* Set the meter action. */
12854                         dev_flow->dv.actions[actions_n++] =
12855                                 wks->fm->meter_action;
12856                         action_flags |= MLX5_FLOW_ACTION_METER;
12857                         break;
12858                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
12859                         if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,
12860                                                               actions, error))
12861                                 return -rte_errno;
12862                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
12863                         break;
12864                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
12865                         if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,
12866                                                               actions, error))
12867                                 return -rte_errno;
12868                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
12869                         break;
12870                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
12871                         sample_act_pos = actions_n;
12872                         sample = (const struct rte_flow_action_sample *)
12873                                  action->conf;
12874                         actions_n++;
12875                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
12876                         /* put encap action into group if work with port id */
12877                         if ((action_flags & MLX5_FLOW_ACTION_ENCAP) &&
12878                             (action_flags & MLX5_FLOW_ACTION_PORT_ID))
12879                                 sample_act->action_flags |=
12880                                                         MLX5_FLOW_ACTION_ENCAP;
12881                         break;
12882                 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
12883                         if (flow_dv_convert_action_modify_field
12884                                         (dev, mhdr_res, actions, attr, error))
12885                                 return -rte_errno;
12886                         action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
12887                         break;
12888                 case RTE_FLOW_ACTION_TYPE_CONNTRACK:
12889                         owner_idx = (uint32_t)(uintptr_t)action->conf;
12890                         ct = flow_aso_ct_get_by_idx(dev, owner_idx);
12891                         if (!ct)
12892                                 return rte_flow_error_set(error, EINVAL,
12893                                                 RTE_FLOW_ERROR_TYPE_ACTION,
12894                                                 NULL,
12895                                                 "Failed to get CT object.");
12896                         if (mlx5_aso_ct_available(priv->sh, ct))
12897                                 return rte_flow_error_set(error, rte_errno,
12898                                                 RTE_FLOW_ERROR_TYPE_ACTION,
12899                                                 NULL,
12900                                                 "CT is unavailable.");
12901                         if (ct->is_original)
12902                                 dev_flow->dv.actions[actions_n] =
12903                                                         ct->dr_action_orig;
12904                         else
12905                                 dev_flow->dv.actions[actions_n] =
12906                                                         ct->dr_action_rply;
12907                         flow->indirect_type = MLX5_INDIRECT_ACTION_TYPE_CT;
12908                         flow->ct = owner_idx;
12909                         __atomic_fetch_add(&ct->refcnt, 1, __ATOMIC_RELAXED);
12910                         actions_n++;
12911                         action_flags |= MLX5_FLOW_ACTION_CT;
12912                         break;
12913                 case RTE_FLOW_ACTION_TYPE_END:
12914                         actions_end = true;
12915                         if (mhdr_res->actions_num) {
12916                                 /* create modify action if needed. */
12917                                 if (flow_dv_modify_hdr_resource_register
12918                                         (dev, mhdr_res, dev_flow, error))
12919                                         return -rte_errno;
12920                                 dev_flow->dv.actions[modify_action_position] =
12921                                         handle->dvh.modify_hdr->action;
12922                         }
12923                         /*
12924                          * Handle AGE and COUNT action by single HW counter
12925                          * when they are not shared.
12926                          */
12927                         if (action_flags & MLX5_FLOW_ACTION_AGE) {
12928                                 if ((non_shared_age &&
12929                                      count && !count->shared) ||
12930                                     !(priv->sh->flow_hit_aso_en &&
12931                                       (attr->group || attr->transfer))) {
12932                                         /* Creates age by counters. */
12933                                         cnt_act = flow_dv_prepare_counter
12934                                                                 (dev, dev_flow,
12935                                                                  flow, count,
12936                                                                  non_shared_age,
12937                                                                  error);
12938                                         if (!cnt_act)
12939                                                 return -rte_errno;
12940                                         dev_flow->dv.actions[age_act_pos] =
12941                                                                 cnt_act->action;
12942                                         break;
12943                                 }
12944                                 if (!flow->age && non_shared_age) {
12945                                         flow->age = flow_dv_aso_age_alloc
12946                                                                 (dev, error);
12947                                         if (!flow->age)
12948                                                 return -rte_errno;
12949                                         flow_dv_aso_age_params_init
12950                                                     (dev, flow->age,
12951                                                      non_shared_age->context ?
12952                                                      non_shared_age->context :
12953                                                      (void *)(uintptr_t)
12954                                                      (dev_flow->flow_idx),
12955                                                      non_shared_age->timeout);
12956                                 }
12957                                 age_act = flow_aso_age_get_by_idx(dev,
12958                                                                   flow->age);
12959                                 dev_flow->dv.actions[age_act_pos] =
12960                                                              age_act->dr_action;
12961                         }
12962                         if (action_flags & MLX5_FLOW_ACTION_COUNT) {
12963                                 /*
12964                                  * Create one count action, to be used
12965                                  * by all sub-flows.
12966                                  */
12967                                 cnt_act = flow_dv_prepare_counter(dev, dev_flow,
12968                                                                   flow, count,
12969                                                                   NULL, error);
12970                                 if (!cnt_act)
12971                                         return -rte_errno;
12972                                 dev_flow->dv.actions[actions_n++] =
12973                                                                 cnt_act->action;
12974                         }
12975                 default:
12976                         break;
12977                 }
12978                 if (mhdr_res->actions_num &&
12979                     modify_action_position == UINT32_MAX)
12980                         modify_action_position = actions_n++;
12981         }
12982         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
12983                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
12984                 int item_type = items->type;
12985
12986                 if (!mlx5_flow_os_item_supported(item_type))
12987                         return rte_flow_error_set(error, ENOTSUP,
12988                                                   RTE_FLOW_ERROR_TYPE_ITEM,
12989                                                   NULL, "item not supported");
12990                 switch (item_type) {
12991                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
12992                         flow_dv_translate_item_port_id
12993                                 (dev, match_mask, match_value, items, attr);
12994                         last_item = MLX5_FLOW_ITEM_PORT_ID;
12995                         break;
12996                 case RTE_FLOW_ITEM_TYPE_ETH:
12997                         flow_dv_translate_item_eth(match_mask, match_value,
12998                                                    items, tunnel,
12999                                                    dev_flow->dv.group);
13000                         matcher.priority = action_flags &
13001                                         MLX5_FLOW_ACTION_DEFAULT_MISS &&
13002                                         !dev_flow->external ?
13003                                         MLX5_PRIORITY_MAP_L3 :
13004                                         MLX5_PRIORITY_MAP_L2;
13005                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
13006                                              MLX5_FLOW_LAYER_OUTER_L2;
13007                         break;
13008                 case RTE_FLOW_ITEM_TYPE_VLAN:
13009                         flow_dv_translate_item_vlan(dev_flow,
13010                                                     match_mask, match_value,
13011                                                     items, tunnel,
13012                                                     dev_flow->dv.group);
13013                         matcher.priority = MLX5_PRIORITY_MAP_L2;
13014                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
13015                                               MLX5_FLOW_LAYER_INNER_VLAN) :
13016                                              (MLX5_FLOW_LAYER_OUTER_L2 |
13017                                               MLX5_FLOW_LAYER_OUTER_VLAN);
13018                         break;
13019                 case RTE_FLOW_ITEM_TYPE_IPV4:
13020                         mlx5_flow_tunnel_ip_check(items, next_protocol,
13021                                                   &item_flags, &tunnel);
13022                         flow_dv_translate_item_ipv4(match_mask, match_value,
13023                                                     items, tunnel,
13024                                                     dev_flow->dv.group);
13025                         matcher.priority = MLX5_PRIORITY_MAP_L3;
13026                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
13027                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
13028                         if (items->mask != NULL &&
13029                             ((const struct rte_flow_item_ipv4 *)
13030                              items->mask)->hdr.next_proto_id) {
13031                                 next_protocol =
13032                                         ((const struct rte_flow_item_ipv4 *)
13033                                          (items->spec))->hdr.next_proto_id;
13034                                 next_protocol &=
13035                                         ((const struct rte_flow_item_ipv4 *)
13036                                          (items->mask))->hdr.next_proto_id;
13037                         } else {
13038                                 /* Reset for inner layer. */
13039                                 next_protocol = 0xff;
13040                         }
13041                         break;
13042                 case RTE_FLOW_ITEM_TYPE_IPV6:
13043                         mlx5_flow_tunnel_ip_check(items, next_protocol,
13044                                                   &item_flags, &tunnel);
13045                         flow_dv_translate_item_ipv6(match_mask, match_value,
13046                                                     items, tunnel,
13047                                                     dev_flow->dv.group);
13048                         matcher.priority = MLX5_PRIORITY_MAP_L3;
13049                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
13050                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
13051                         if (items->mask != NULL &&
13052                             ((const struct rte_flow_item_ipv6 *)
13053                              items->mask)->hdr.proto) {
13054                                 next_protocol =
13055                                         ((const struct rte_flow_item_ipv6 *)
13056                                          items->spec)->hdr.proto;
13057                                 next_protocol &=
13058                                         ((const struct rte_flow_item_ipv6 *)
13059                                          items->mask)->hdr.proto;
13060                         } else {
13061                                 /* Reset for inner layer. */
13062                                 next_protocol = 0xff;
13063                         }
13064                         break;
13065                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
13066                         flow_dv_translate_item_ipv6_frag_ext(match_mask,
13067                                                              match_value,
13068                                                              items, tunnel);
13069                         last_item = tunnel ?
13070                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
13071                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
13072                         if (items->mask != NULL &&
13073                             ((const struct rte_flow_item_ipv6_frag_ext *)
13074                              items->mask)->hdr.next_header) {
13075                                 next_protocol =
13076                                 ((const struct rte_flow_item_ipv6_frag_ext *)
13077                                  items->spec)->hdr.next_header;
13078                                 next_protocol &=
13079                                 ((const struct rte_flow_item_ipv6_frag_ext *)
13080                                  items->mask)->hdr.next_header;
13081                         } else {
13082                                 /* Reset for inner layer. */
13083                                 next_protocol = 0xff;
13084                         }
13085                         break;
13086                 case RTE_FLOW_ITEM_TYPE_TCP:
13087                         flow_dv_translate_item_tcp(match_mask, match_value,
13088                                                    items, tunnel);
13089                         matcher.priority = MLX5_PRIORITY_MAP_L4;
13090                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
13091                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
13092                         break;
13093                 case RTE_FLOW_ITEM_TYPE_UDP:
13094                         flow_dv_translate_item_udp(match_mask, match_value,
13095                                                    items, tunnel);
13096                         matcher.priority = MLX5_PRIORITY_MAP_L4;
13097                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
13098                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
13099                         break;
13100                 case RTE_FLOW_ITEM_TYPE_GRE:
13101                         flow_dv_translate_item_gre(match_mask, match_value,
13102                                                    items, tunnel);
13103                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13104                         last_item = MLX5_FLOW_LAYER_GRE;
13105                         break;
13106                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
13107                         flow_dv_translate_item_gre_key(match_mask,
13108                                                        match_value, items);
13109                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
13110                         break;
13111                 case RTE_FLOW_ITEM_TYPE_NVGRE:
13112                         flow_dv_translate_item_nvgre(match_mask, match_value,
13113                                                      items, tunnel);
13114                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13115                         last_item = MLX5_FLOW_LAYER_GRE;
13116                         break;
13117                 case RTE_FLOW_ITEM_TYPE_VXLAN:
13118                         flow_dv_translate_item_vxlan(dev, attr,
13119                                                      match_mask, match_value,
13120                                                      items, tunnel);
13121                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13122                         last_item = MLX5_FLOW_LAYER_VXLAN;
13123                         break;
13124                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
13125                         flow_dv_translate_item_vxlan_gpe(match_mask,
13126                                                          match_value, items,
13127                                                          tunnel);
13128                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13129                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
13130                         break;
13131                 case RTE_FLOW_ITEM_TYPE_GENEVE:
13132                         flow_dv_translate_item_geneve(match_mask, match_value,
13133                                                       items, tunnel);
13134                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13135                         last_item = MLX5_FLOW_LAYER_GENEVE;
13136                         break;
13137                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
13138                         ret = flow_dv_translate_item_geneve_opt(dev, match_mask,
13139                                                           match_value,
13140                                                           items, error);
13141                         if (ret)
13142                                 return rte_flow_error_set(error, -ret,
13143                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
13144                                         "cannot create GENEVE TLV option");
13145                         flow->geneve_tlv_option = 1;
13146                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
13147                         break;
13148                 case RTE_FLOW_ITEM_TYPE_MPLS:
13149                         flow_dv_translate_item_mpls(match_mask, match_value,
13150                                                     items, last_item, tunnel);
13151                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13152                         last_item = MLX5_FLOW_LAYER_MPLS;
13153                         break;
13154                 case RTE_FLOW_ITEM_TYPE_MARK:
13155                         flow_dv_translate_item_mark(dev, match_mask,
13156                                                     match_value, items);
13157                         last_item = MLX5_FLOW_ITEM_MARK;
13158                         break;
13159                 case RTE_FLOW_ITEM_TYPE_META:
13160                         flow_dv_translate_item_meta(dev, match_mask,
13161                                                     match_value, attr, items);
13162                         last_item = MLX5_FLOW_ITEM_METADATA;
13163                         break;
13164                 case RTE_FLOW_ITEM_TYPE_ICMP:
13165                         flow_dv_translate_item_icmp(match_mask, match_value,
13166                                                     items, tunnel);
13167                         last_item = MLX5_FLOW_LAYER_ICMP;
13168                         break;
13169                 case RTE_FLOW_ITEM_TYPE_ICMP6:
13170                         flow_dv_translate_item_icmp6(match_mask, match_value,
13171                                                       items, tunnel);
13172                         last_item = MLX5_FLOW_LAYER_ICMP6;
13173                         break;
13174                 case RTE_FLOW_ITEM_TYPE_TAG:
13175                         flow_dv_translate_item_tag(dev, match_mask,
13176                                                    match_value, items);
13177                         last_item = MLX5_FLOW_ITEM_TAG;
13178                         break;
13179                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
13180                         flow_dv_translate_mlx5_item_tag(dev, match_mask,
13181                                                         match_value, items);
13182                         last_item = MLX5_FLOW_ITEM_TAG;
13183                         break;
13184                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
13185                         flow_dv_translate_item_tx_queue(dev, match_mask,
13186                                                         match_value,
13187                                                         items);
13188                         last_item = MLX5_FLOW_ITEM_TX_QUEUE;
13189                         break;
13190                 case RTE_FLOW_ITEM_TYPE_GTP:
13191                         flow_dv_translate_item_gtp(match_mask, match_value,
13192                                                    items, tunnel);
13193                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
13194                         last_item = MLX5_FLOW_LAYER_GTP;
13195                         break;
13196                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
13197                         ret = flow_dv_translate_item_gtp_psc(match_mask,
13198                                                           match_value,
13199                                                           items);
13200                         if (ret)
13201                                 return rte_flow_error_set(error, -ret,
13202                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
13203                                         "cannot create GTP PSC item");
13204                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
13205                         break;
13206                 case RTE_FLOW_ITEM_TYPE_ECPRI:
13207                         if (!mlx5_flex_parser_ecpri_exist(dev)) {
13208                                 /* Create it only the first time to be used. */
13209                                 ret = mlx5_flex_parser_ecpri_alloc(dev);
13210                                 if (ret)
13211                                         return rte_flow_error_set
13212                                                 (error, -ret,
13213                                                 RTE_FLOW_ERROR_TYPE_ITEM,
13214                                                 NULL,
13215                                                 "cannot create eCPRI parser");
13216                         }
13217                         flow_dv_translate_item_ecpri(dev, match_mask,
13218                                                      match_value, items);
13219                         /* No other protocol should follow eCPRI layer. */
13220                         last_item = MLX5_FLOW_LAYER_ECPRI;
13221                         break;
13222                 case RTE_FLOW_ITEM_TYPE_INTEGRITY:
13223                         flow_dv_translate_item_integrity(match_mask,
13224                                                          match_value,
13225                                                          head_item, items);
13226                         break;
13227                 case RTE_FLOW_ITEM_TYPE_CONNTRACK:
13228                         flow_dv_translate_item_aso_ct(dev, match_mask,
13229                                                       match_value, items);
13230                         break;
13231                 default:
13232                         break;
13233                 }
13234                 item_flags |= last_item;
13235         }
13236         /*
13237          * When E-Switch mode is enabled, we have two cases where we need to
13238          * set the source port manually.
13239          * The first one, is in case of Nic steering rule, and the second is
13240          * E-Switch rule where no port_id item was found. In both cases
13241          * the source port is set according the current port in use.
13242          */
13243         if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
13244             (priv->representor || priv->master)) {
13245                 if (flow_dv_translate_item_port_id(dev, match_mask,
13246                                                    match_value, NULL, attr))
13247                         return -rte_errno;
13248         }
13249 #ifdef RTE_LIBRTE_MLX5_DEBUG
13250         MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
13251                                               dev_flow->dv.value.buf));
13252 #endif
13253         /*
13254          * Layers may be already initialized from prefix flow if this dev_flow
13255          * is the suffix flow.
13256          */
13257         handle->layers |= item_flags;
13258         if (action_flags & MLX5_FLOW_ACTION_RSS)
13259                 flow_dv_hashfields_set(dev_flow, rss_desc);
13260         /* If has RSS action in the sample action, the Sample/Mirror resource
13261          * should be registered after the hash filed be update.
13262          */
13263         if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
13264                 ret = flow_dv_translate_action_sample(dev,
13265                                                       sample,
13266                                                       dev_flow, attr,
13267                                                       &num_of_dest,
13268                                                       sample_actions,
13269                                                       &sample_res,
13270                                                       error);
13271                 if (ret < 0)
13272                         return ret;
13273                 ret = flow_dv_create_action_sample(dev,
13274                                                    dev_flow,
13275                                                    num_of_dest,
13276                                                    &sample_res,
13277                                                    &mdest_res,
13278                                                    sample_actions,
13279                                                    action_flags,
13280                                                    error);
13281                 if (ret < 0)
13282                         return rte_flow_error_set
13283                                                 (error, rte_errno,
13284                                                 RTE_FLOW_ERROR_TYPE_ACTION,
13285                                                 NULL,
13286                                                 "cannot create sample action");
13287                 if (num_of_dest > 1) {
13288                         dev_flow->dv.actions[sample_act_pos] =
13289                         dev_flow->dv.dest_array_res->action;
13290                 } else {
13291                         dev_flow->dv.actions[sample_act_pos] =
13292                         dev_flow->dv.sample_res->verbs_action;
13293                 }
13294         }
13295         /*
13296          * For multiple destination (sample action with ratio=1), the encap
13297          * action and port id action will be combined into group action.
13298          * So need remove the original these actions in the flow and only
13299          * use the sample action instead of.
13300          */
13301         if (num_of_dest > 1 &&
13302             (sample_act->dr_port_id_action || sample_act->dr_jump_action)) {
13303                 int i;
13304                 void *temp_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
13305
13306                 for (i = 0; i < actions_n; i++) {
13307                         if ((sample_act->dr_encap_action &&
13308                                 sample_act->dr_encap_action ==
13309                                 dev_flow->dv.actions[i]) ||
13310                                 (sample_act->dr_port_id_action &&
13311                                 sample_act->dr_port_id_action ==
13312                                 dev_flow->dv.actions[i]) ||
13313                                 (sample_act->dr_jump_action &&
13314                                 sample_act->dr_jump_action ==
13315                                 dev_flow->dv.actions[i]))
13316                                 continue;
13317                         temp_actions[tmp_actions_n++] = dev_flow->dv.actions[i];
13318                 }
13319                 memcpy((void *)dev_flow->dv.actions,
13320                                 (void *)temp_actions,
13321                                 tmp_actions_n * sizeof(void *));
13322                 actions_n = tmp_actions_n;
13323         }
13324         dev_flow->dv.actions_n = actions_n;
13325         dev_flow->act_flags = action_flags;
13326         if (wks->skip_matcher_reg)
13327                 return 0;
13328         /* Register matcher. */
13329         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
13330                                     matcher.mask.size);
13331         matcher.priority = mlx5_get_matcher_priority(dev, attr,
13332                                         matcher.priority);
13333         /**
13334          * When creating meter drop flow in drop table, using original
13335          * 5-tuple match, the matcher priority should be lower than
13336          * mtr_id matcher.
13337          */
13338         if (attr->group == MLX5_FLOW_TABLE_LEVEL_METER &&
13339             dev_flow->dv.table_id == MLX5_MTR_TABLE_ID_DROP &&
13340             matcher.priority <= MLX5_REG_BITS)
13341                 matcher.priority += MLX5_REG_BITS;
13342         /* reserved field no needs to be set to 0 here. */
13343         tbl_key.is_fdb = attr->transfer;
13344         tbl_key.is_egress = attr->egress;
13345         tbl_key.level = dev_flow->dv.group;
13346         tbl_key.id = dev_flow->dv.table_id;
13347         if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow,
13348                                      tunnel, attr->group, error))
13349                 return -rte_errno;
13350         return 0;
13351 }
13352
13353 /**
13354  * Set hash RX queue by hash fields (see enum ibv_rx_hash_fields)
13355  * and tunnel.
13356  *
13357  * @param[in, out] action
13358  *   Shred RSS action holding hash RX queue objects.
13359  * @param[in] hash_fields
13360  *   Defines combination of packet fields to participate in RX hash.
13361  * @param[in] tunnel
13362  *   Tunnel type
13363  * @param[in] hrxq_idx
13364  *   Hash RX queue index to set.
13365  *
13366  * @return
13367  *   0 on success, otherwise negative errno value.
13368  */
13369 static int
13370 __flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action,
13371                               const uint64_t hash_fields,
13372                               uint32_t hrxq_idx)
13373 {
13374         uint32_t *hrxqs = action->hrxq;
13375
13376         switch (hash_fields & ~IBV_RX_HASH_INNER) {
13377         case MLX5_RSS_HASH_IPV4:
13378                 /* fall-through. */
13379         case MLX5_RSS_HASH_IPV4_DST_ONLY:
13380                 /* fall-through. */
13381         case MLX5_RSS_HASH_IPV4_SRC_ONLY:
13382                 hrxqs[0] = hrxq_idx;
13383                 return 0;
13384         case MLX5_RSS_HASH_IPV4_TCP:
13385                 /* fall-through. */
13386         case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
13387                 /* fall-through. */
13388         case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
13389                 hrxqs[1] = hrxq_idx;
13390                 return 0;
13391         case MLX5_RSS_HASH_IPV4_UDP:
13392                 /* fall-through. */
13393         case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
13394                 /* fall-through. */
13395         case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
13396                 hrxqs[2] = hrxq_idx;
13397                 return 0;
13398         case MLX5_RSS_HASH_IPV6:
13399                 /* fall-through. */
13400         case MLX5_RSS_HASH_IPV6_DST_ONLY:
13401                 /* fall-through. */
13402         case MLX5_RSS_HASH_IPV6_SRC_ONLY:
13403                 hrxqs[3] = hrxq_idx;
13404                 return 0;
13405         case MLX5_RSS_HASH_IPV6_TCP:
13406                 /* fall-through. */
13407         case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
13408                 /* fall-through. */
13409         case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
13410                 hrxqs[4] = hrxq_idx;
13411                 return 0;
13412         case MLX5_RSS_HASH_IPV6_UDP:
13413                 /* fall-through. */
13414         case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
13415                 /* fall-through. */
13416         case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
13417                 hrxqs[5] = hrxq_idx;
13418                 return 0;
13419         case MLX5_RSS_HASH_NONE:
13420                 hrxqs[6] = hrxq_idx;
13421                 return 0;
13422         default:
13423                 return -1;
13424         }
13425 }
13426
13427 /**
13428  * Look up for hash RX queue by hash fields (see enum ibv_rx_hash_fields)
13429  * and tunnel.
13430  *
13431  * @param[in] dev
13432  *   Pointer to the Ethernet device structure.
13433  * @param[in] idx
13434  *   Shared RSS action ID holding hash RX queue objects.
13435  * @param[in] hash_fields
13436  *   Defines combination of packet fields to participate in RX hash.
13437  * @param[in] tunnel
13438  *   Tunnel type
13439  *
13440  * @return
13441  *   Valid hash RX queue index, otherwise 0.
13442  */
13443 static uint32_t
13444 __flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
13445                                  const uint64_t hash_fields)
13446 {
13447         struct mlx5_priv *priv = dev->data->dev_private;
13448         struct mlx5_shared_action_rss *shared_rss =
13449             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
13450         const uint32_t *hrxqs = shared_rss->hrxq;
13451
13452         switch (hash_fields & ~IBV_RX_HASH_INNER) {
13453         case MLX5_RSS_HASH_IPV4:
13454                 /* fall-through. */
13455         case MLX5_RSS_HASH_IPV4_DST_ONLY:
13456                 /* fall-through. */
13457         case MLX5_RSS_HASH_IPV4_SRC_ONLY:
13458                 return hrxqs[0];
13459         case MLX5_RSS_HASH_IPV4_TCP:
13460                 /* fall-through. */
13461         case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
13462                 /* fall-through. */
13463         case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
13464                 return hrxqs[1];
13465         case MLX5_RSS_HASH_IPV4_UDP:
13466                 /* fall-through. */
13467         case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
13468                 /* fall-through. */
13469         case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
13470                 return hrxqs[2];
13471         case MLX5_RSS_HASH_IPV6:
13472                 /* fall-through. */
13473         case MLX5_RSS_HASH_IPV6_DST_ONLY:
13474                 /* fall-through. */
13475         case MLX5_RSS_HASH_IPV6_SRC_ONLY:
13476                 return hrxqs[3];
13477         case MLX5_RSS_HASH_IPV6_TCP:
13478                 /* fall-through. */
13479         case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
13480                 /* fall-through. */
13481         case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
13482                 return hrxqs[4];
13483         case MLX5_RSS_HASH_IPV6_UDP:
13484                 /* fall-through. */
13485         case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
13486                 /* fall-through. */
13487         case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
13488                 return hrxqs[5];
13489         case MLX5_RSS_HASH_NONE:
13490                 return hrxqs[6];
13491         default:
13492                 return 0;
13493         }
13494
13495 }
13496
13497 /**
13498  * Apply the flow to the NIC, lock free,
13499  * (mutex should be acquired by caller).
13500  *
13501  * @param[in] dev
13502  *   Pointer to the Ethernet device structure.
13503  * @param[in, out] flow
13504  *   Pointer to flow structure.
13505  * @param[out] error
13506  *   Pointer to error structure.
13507  *
13508  * @return
13509  *   0 on success, a negative errno value otherwise and rte_errno is set.
13510  */
13511 static int
13512 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
13513               struct rte_flow_error *error)
13514 {
13515         struct mlx5_flow_dv_workspace *dv;
13516         struct mlx5_flow_handle *dh;
13517         struct mlx5_flow_handle_dv *dv_h;
13518         struct mlx5_flow *dev_flow;
13519         struct mlx5_priv *priv = dev->data->dev_private;
13520         uint32_t handle_idx;
13521         int n;
13522         int err;
13523         int idx;
13524         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
13525         struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
13526         uint8_t misc_mask;
13527
13528         MLX5_ASSERT(wks);
13529         for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
13530                 dev_flow = &wks->flows[idx];
13531                 dv = &dev_flow->dv;
13532                 dh = dev_flow->handle;
13533                 dv_h = &dh->dvh;
13534                 n = dv->actions_n;
13535                 if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
13536                         if (dv->transfer) {
13537                                 MLX5_ASSERT(priv->sh->dr_drop_action);
13538                                 dv->actions[n++] = priv->sh->dr_drop_action;
13539                         } else {
13540 #ifdef HAVE_MLX5DV_DR
13541                                 /* DR supports drop action placeholder. */
13542                                 MLX5_ASSERT(priv->sh->dr_drop_action);
13543                                 dv->actions[n++] = priv->sh->dr_drop_action;
13544 #else
13545                                 /* For DV we use the explicit drop queue. */
13546                                 MLX5_ASSERT(priv->drop_queue.hrxq);
13547                                 dv->actions[n++] =
13548                                                 priv->drop_queue.hrxq->action;
13549 #endif
13550                         }
13551                 } else if ((dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
13552                            !dv_h->rix_sample && !dv_h->rix_dest_array)) {
13553                         struct mlx5_hrxq *hrxq;
13554                         uint32_t hrxq_idx;
13555
13556                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
13557                                                     &hrxq_idx);
13558                         if (!hrxq) {
13559                                 rte_flow_error_set
13560                                         (error, rte_errno,
13561                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13562                                          "cannot get hash queue");
13563                                 goto error;
13564                         }
13565                         dh->rix_hrxq = hrxq_idx;
13566                         dv->actions[n++] = hrxq->action;
13567                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
13568                         struct mlx5_hrxq *hrxq = NULL;
13569                         uint32_t hrxq_idx;
13570
13571                         hrxq_idx = __flow_dv_action_rss_hrxq_lookup(dev,
13572                                                 rss_desc->shared_rss,
13573                                                 dev_flow->hash_fields);
13574                         if (hrxq_idx)
13575                                 hrxq = mlx5_ipool_get
13576                                         (priv->sh->ipool[MLX5_IPOOL_HRXQ],
13577                                          hrxq_idx);
13578                         if (!hrxq) {
13579                                 rte_flow_error_set
13580                                         (error, rte_errno,
13581                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13582                                          "cannot get hash queue");
13583                                 goto error;
13584                         }
13585                         dh->rix_srss = rss_desc->shared_rss;
13586                         dv->actions[n++] = hrxq->action;
13587                 } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
13588                         if (!priv->sh->default_miss_action) {
13589                                 rte_flow_error_set
13590                                         (error, rte_errno,
13591                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13592                                          "default miss action not be created.");
13593                                 goto error;
13594                         }
13595                         dv->actions[n++] = priv->sh->default_miss_action;
13596                 }
13597                 misc_mask = flow_dv_matcher_enable(dv->value.buf);
13598                 __flow_dv_adjust_buf_size(&dv->value.size, misc_mask);
13599                 err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object,
13600                                                (void *)&dv->value, n,
13601                                                dv->actions, &dh->drv_flow);
13602                 if (err) {
13603                         rte_flow_error_set
13604                                 (error, errno,
13605                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13606                                 NULL,
13607                                 (!priv->config.allow_duplicate_pattern &&
13608                                 errno == EEXIST) ?
13609                                 "duplicating pattern is not allowed" :
13610                                 "hardware refuses to create flow");
13611                         goto error;
13612                 }
13613                 if (priv->vmwa_context &&
13614                     dh->vf_vlan.tag && !dh->vf_vlan.created) {
13615                         /*
13616                          * The rule contains the VLAN pattern.
13617                          * For VF we are going to create VLAN
13618                          * interface to make hypervisor set correct
13619                          * e-Switch vport context.
13620                          */
13621                         mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
13622                 }
13623         }
13624         return 0;
13625 error:
13626         err = rte_errno; /* Save rte_errno before cleanup. */
13627         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
13628                        handle_idx, dh, next) {
13629                 /* hrxq is union, don't clear it if the flag is not set. */
13630                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) {
13631                         mlx5_hrxq_release(dev, dh->rix_hrxq);
13632                         dh->rix_hrxq = 0;
13633                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
13634                         dh->rix_srss = 0;
13635                 }
13636                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
13637                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
13638         }
13639         rte_errno = err; /* Restore rte_errno. */
13640         return -rte_errno;
13641 }
13642
13643 void
13644 flow_dv_matcher_remove_cb(struct mlx5_list *list __rte_unused,
13645                           struct mlx5_list_entry *entry)
13646 {
13647         struct mlx5_flow_dv_matcher *resource = container_of(entry,
13648                                                              typeof(*resource),
13649                                                              entry);
13650
13651         claim_zero(mlx5_flow_os_destroy_flow_matcher(resource->matcher_object));
13652         mlx5_free(resource);
13653 }
13654
13655 /**
13656  * Release the flow matcher.
13657  *
13658  * @param dev
13659  *   Pointer to Ethernet device.
13660  * @param port_id
13661  *   Index to port ID action resource.
13662  *
13663  * @return
13664  *   1 while a reference on it exists, 0 when freed.
13665  */
13666 static int
13667 flow_dv_matcher_release(struct rte_eth_dev *dev,
13668                         struct mlx5_flow_handle *handle)
13669 {
13670         struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
13671         struct mlx5_flow_tbl_data_entry *tbl = container_of(matcher->tbl,
13672                                                             typeof(*tbl), tbl);
13673         int ret;
13674
13675         MLX5_ASSERT(matcher->matcher_object);
13676         ret = mlx5_list_unregister(&tbl->matchers, &matcher->entry);
13677         flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl->tbl);
13678         return ret;
13679 }
13680
13681 /**
13682  * Release encap_decap resource.
13683  *
13684  * @param list
13685  *   Pointer to the hash list.
13686  * @param entry
13687  *   Pointer to exist resource entry object.
13688  */
13689 void
13690 flow_dv_encap_decap_remove_cb(struct mlx5_hlist *list,
13691                               struct mlx5_hlist_entry *entry)
13692 {
13693         struct mlx5_dev_ctx_shared *sh = list->ctx;
13694         struct mlx5_flow_dv_encap_decap_resource *res =
13695                                        container_of(entry, typeof(*res), entry);
13696
13697         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
13698         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
13699 }
13700
13701 /**
13702  * Release an encap/decap resource.
13703  *
13704  * @param dev
13705  *   Pointer to Ethernet device.
13706  * @param encap_decap_idx
13707  *   Index of encap decap resource.
13708  *
13709  * @return
13710  *   1 while a reference on it exists, 0 when freed.
13711  */
13712 static int
13713 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
13714                                      uint32_t encap_decap_idx)
13715 {
13716         struct mlx5_priv *priv = dev->data->dev_private;
13717         struct mlx5_flow_dv_encap_decap_resource *resource;
13718
13719         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
13720                                   encap_decap_idx);
13721         if (!resource)
13722                 return 0;
13723         MLX5_ASSERT(resource->action);
13724         return mlx5_hlist_unregister(priv->sh->encaps_decaps, &resource->entry);
13725 }
13726
13727 /**
13728  * Release an jump to table action resource.
13729  *
13730  * @param dev
13731  *   Pointer to Ethernet device.
13732  * @param rix_jump
13733  *   Index to the jump action resource.
13734  *
13735  * @return
13736  *   1 while a reference on it exists, 0 when freed.
13737  */
13738 static int
13739 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
13740                                   uint32_t rix_jump)
13741 {
13742         struct mlx5_priv *priv = dev->data->dev_private;
13743         struct mlx5_flow_tbl_data_entry *tbl_data;
13744
13745         tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
13746                                   rix_jump);
13747         if (!tbl_data)
13748                 return 0;
13749         return flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl_data->tbl);
13750 }
13751
13752 void
13753 flow_dv_modify_remove_cb(struct mlx5_hlist *list __rte_unused,
13754                          struct mlx5_hlist_entry *entry)
13755 {
13756         struct mlx5_flow_dv_modify_hdr_resource *res =
13757                 container_of(entry, typeof(*res), entry);
13758
13759         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
13760         mlx5_free(entry);
13761 }
13762
13763 /**
13764  * Release a modify-header resource.
13765  *
13766  * @param dev
13767  *   Pointer to Ethernet device.
13768  * @param handle
13769  *   Pointer to mlx5_flow_handle.
13770  *
13771  * @return
13772  *   1 while a reference on it exists, 0 when freed.
13773  */
13774 static int
13775 flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev,
13776                                     struct mlx5_flow_handle *handle)
13777 {
13778         struct mlx5_priv *priv = dev->data->dev_private;
13779         struct mlx5_flow_dv_modify_hdr_resource *entry = handle->dvh.modify_hdr;
13780
13781         MLX5_ASSERT(entry->action);
13782         return mlx5_hlist_unregister(priv->sh->modify_cmds, &entry->entry);
13783 }
13784
13785 void
13786 flow_dv_port_id_remove_cb(struct mlx5_list *list,
13787                           struct mlx5_list_entry *entry)
13788 {
13789         struct mlx5_dev_ctx_shared *sh = list->ctx;
13790         struct mlx5_flow_dv_port_id_action_resource *resource =
13791                                   container_of(entry, typeof(*resource), entry);
13792
13793         claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
13794         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], resource->idx);
13795 }
13796
13797 /**
13798  * Release port ID action resource.
13799  *
13800  * @param dev
13801  *   Pointer to Ethernet device.
13802  * @param handle
13803  *   Pointer to mlx5_flow_handle.
13804  *
13805  * @return
13806  *   1 while a reference on it exists, 0 when freed.
13807  */
13808 static int
13809 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
13810                                         uint32_t port_id)
13811 {
13812         struct mlx5_priv *priv = dev->data->dev_private;
13813         struct mlx5_flow_dv_port_id_action_resource *resource;
13814
13815         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], port_id);
13816         if (!resource)
13817                 return 0;
13818         MLX5_ASSERT(resource->action);
13819         return mlx5_list_unregister(&priv->sh->port_id_action_list,
13820                                     &resource->entry);
13821 }
13822
13823 /**
13824  * Release shared RSS action resource.
13825  *
13826  * @param dev
13827  *   Pointer to Ethernet device.
13828  * @param srss
13829  *   Shared RSS action index.
13830  */
13831 static void
13832 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss)
13833 {
13834         struct mlx5_priv *priv = dev->data->dev_private;
13835         struct mlx5_shared_action_rss *shared_rss;
13836
13837         shared_rss = mlx5_ipool_get
13838                         (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], srss);
13839         __atomic_sub_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
13840 }
13841
13842 void
13843 flow_dv_push_vlan_remove_cb(struct mlx5_list *list,
13844                             struct mlx5_list_entry *entry)
13845 {
13846         struct mlx5_dev_ctx_shared *sh = list->ctx;
13847         struct mlx5_flow_dv_push_vlan_action_resource *resource =
13848                         container_of(entry, typeof(*resource), entry);
13849
13850         claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
13851         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], resource->idx);
13852 }
13853
13854 /**
13855  * Release push vlan action resource.
13856  *
13857  * @param dev
13858  *   Pointer to Ethernet device.
13859  * @param handle
13860  *   Pointer to mlx5_flow_handle.
13861  *
13862  * @return
13863  *   1 while a reference on it exists, 0 when freed.
13864  */
13865 static int
13866 flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev,
13867                                           struct mlx5_flow_handle *handle)
13868 {
13869         struct mlx5_priv *priv = dev->data->dev_private;
13870         struct mlx5_flow_dv_push_vlan_action_resource *resource;
13871         uint32_t idx = handle->dvh.rix_push_vlan;
13872
13873         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
13874         if (!resource)
13875                 return 0;
13876         MLX5_ASSERT(resource->action);
13877         return mlx5_list_unregister(&priv->sh->push_vlan_action_list,
13878                                     &resource->entry);
13879 }
13880
13881 /**
13882  * Release the fate resource.
13883  *
13884  * @param dev
13885  *   Pointer to Ethernet device.
13886  * @param handle
13887  *   Pointer to mlx5_flow_handle.
13888  */
13889 static void
13890 flow_dv_fate_resource_release(struct rte_eth_dev *dev,
13891                                struct mlx5_flow_handle *handle)
13892 {
13893         if (!handle->rix_fate)
13894                 return;
13895         switch (handle->fate_action) {
13896         case MLX5_FLOW_FATE_QUEUE:
13897                 if (!handle->dvh.rix_sample && !handle->dvh.rix_dest_array)
13898                         mlx5_hrxq_release(dev, handle->rix_hrxq);
13899                 break;
13900         case MLX5_FLOW_FATE_JUMP:
13901                 flow_dv_jump_tbl_resource_release(dev, handle->rix_jump);
13902                 break;
13903         case MLX5_FLOW_FATE_PORT_ID:
13904                 flow_dv_port_id_action_resource_release(dev,
13905                                 handle->rix_port_id_action);
13906                 break;
13907         default:
13908                 DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
13909                 break;
13910         }
13911         handle->rix_fate = 0;
13912 }
13913
13914 void
13915 flow_dv_sample_remove_cb(struct mlx5_list *list __rte_unused,
13916                          struct mlx5_list_entry *entry)
13917 {
13918         struct mlx5_flow_dv_sample_resource *resource = container_of(entry,
13919                                                               typeof(*resource),
13920                                                               entry);
13921         struct rte_eth_dev *dev = resource->dev;
13922         struct mlx5_priv *priv = dev->data->dev_private;
13923
13924         if (resource->verbs_action)
13925                 claim_zero(mlx5_flow_os_destroy_flow_action
13926                                                       (resource->verbs_action));
13927         if (resource->normal_path_tbl)
13928                 flow_dv_tbl_resource_release(MLX5_SH(dev),
13929                                              resource->normal_path_tbl);
13930         flow_dv_sample_sub_actions_release(dev, &resource->sample_idx);
13931         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE], resource->idx);
13932         DRV_LOG(DEBUG, "sample resource %p: removed", (void *)resource);
13933 }
13934
13935 /**
13936  * Release an sample resource.
13937  *
13938  * @param dev
13939  *   Pointer to Ethernet device.
13940  * @param handle
13941  *   Pointer to mlx5_flow_handle.
13942  *
13943  * @return
13944  *   1 while a reference on it exists, 0 when freed.
13945  */
13946 static int
13947 flow_dv_sample_resource_release(struct rte_eth_dev *dev,
13948                                      struct mlx5_flow_handle *handle)
13949 {
13950         struct mlx5_priv *priv = dev->data->dev_private;
13951         struct mlx5_flow_dv_sample_resource *resource;
13952
13953         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
13954                                   handle->dvh.rix_sample);
13955         if (!resource)
13956                 return 0;
13957         MLX5_ASSERT(resource->verbs_action);
13958         return mlx5_list_unregister(&priv->sh->sample_action_list,
13959                                     &resource->entry);
13960 }
13961
13962 void
13963 flow_dv_dest_array_remove_cb(struct mlx5_list *list __rte_unused,
13964                              struct mlx5_list_entry *entry)
13965 {
13966         struct mlx5_flow_dv_dest_array_resource *resource =
13967                         container_of(entry, typeof(*resource), entry);
13968         struct rte_eth_dev *dev = resource->dev;
13969         struct mlx5_priv *priv = dev->data->dev_private;
13970         uint32_t i = 0;
13971
13972         MLX5_ASSERT(resource->action);
13973         if (resource->action)
13974                 claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
13975         for (; i < resource->num_of_dest; i++)
13976                 flow_dv_sample_sub_actions_release(dev,
13977                                                    &resource->sample_idx[i]);
13978         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], resource->idx);
13979         DRV_LOG(DEBUG, "destination array resource %p: removed",
13980                 (void *)resource);
13981 }
13982
13983 /**
13984  * Release an destination array resource.
13985  *
13986  * @param dev
13987  *   Pointer to Ethernet device.
13988  * @param handle
13989  *   Pointer to mlx5_flow_handle.
13990  *
13991  * @return
13992  *   1 while a reference on it exists, 0 when freed.
13993  */
13994 static int
13995 flow_dv_dest_array_resource_release(struct rte_eth_dev *dev,
13996                                     struct mlx5_flow_handle *handle)
13997 {
13998         struct mlx5_priv *priv = dev->data->dev_private;
13999         struct mlx5_flow_dv_dest_array_resource *resource;
14000
14001         resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
14002                                   handle->dvh.rix_dest_array);
14003         if (!resource)
14004                 return 0;
14005         MLX5_ASSERT(resource->action);
14006         return mlx5_list_unregister(&priv->sh->dest_array_list,
14007                                     &resource->entry);
14008 }
14009
14010 static void
14011 flow_dv_geneve_tlv_option_resource_release(struct rte_eth_dev *dev)
14012 {
14013         struct mlx5_priv *priv = dev->data->dev_private;
14014         struct mlx5_dev_ctx_shared *sh = priv->sh;
14015         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
14016                                 sh->geneve_tlv_option_resource;
14017         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
14018         if (geneve_opt_resource) {
14019                 if (!(__atomic_sub_fetch(&geneve_opt_resource->refcnt, 1,
14020                                          __ATOMIC_RELAXED))) {
14021                         claim_zero(mlx5_devx_cmd_destroy
14022                                         (geneve_opt_resource->obj));
14023                         mlx5_free(sh->geneve_tlv_option_resource);
14024                         sh->geneve_tlv_option_resource = NULL;
14025                 }
14026         }
14027         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
14028 }
14029
14030 /**
14031  * Remove the flow from the NIC but keeps it in memory.
14032  * Lock free, (mutex should be acquired by caller).
14033  *
14034  * @param[in] dev
14035  *   Pointer to Ethernet device.
14036  * @param[in, out] flow
14037  *   Pointer to flow structure.
14038  */
14039 static void
14040 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
14041 {
14042         struct mlx5_flow_handle *dh;
14043         uint32_t handle_idx;
14044         struct mlx5_priv *priv = dev->data->dev_private;
14045
14046         if (!flow)
14047                 return;
14048         handle_idx = flow->dev_handles;
14049         while (handle_idx) {
14050                 dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
14051                                     handle_idx);
14052                 if (!dh)
14053                         return;
14054                 if (dh->drv_flow) {
14055                         claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow));
14056                         dh->drv_flow = NULL;
14057                 }
14058                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
14059                         flow_dv_fate_resource_release(dev, dh);
14060                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
14061                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
14062                 handle_idx = dh->next.next;
14063         }
14064 }
14065
14066 /**
14067  * Remove the flow from the NIC and the memory.
14068  * Lock free, (mutex should be acquired by caller).
14069  *
14070  * @param[in] dev
14071  *   Pointer to the Ethernet device structure.
14072  * @param[in, out] flow
14073  *   Pointer to flow structure.
14074  */
14075 static void
14076 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
14077 {
14078         struct mlx5_flow_handle *dev_handle;
14079         struct mlx5_priv *priv = dev->data->dev_private;
14080         struct mlx5_flow_meter_info *fm = NULL;
14081         uint32_t srss = 0;
14082
14083         if (!flow)
14084                 return;
14085         flow_dv_remove(dev, flow);
14086         if (flow->counter) {
14087                 flow_dv_counter_free(dev, flow->counter);
14088                 flow->counter = 0;
14089         }
14090         if (flow->meter) {
14091                 fm = flow_dv_meter_find_by_idx(priv, flow->meter);
14092                 if (fm)
14093                         mlx5_flow_meter_detach(priv, fm);
14094                 flow->meter = 0;
14095         }
14096         /* Keep the current age handling by default. */
14097         if (flow->indirect_type == MLX5_INDIRECT_ACTION_TYPE_CT && flow->ct)
14098                 flow_dv_aso_ct_release(dev, flow->ct);
14099         else if (flow->age)
14100                 flow_dv_aso_age_release(dev, flow->age);
14101         if (flow->geneve_tlv_option) {
14102                 flow_dv_geneve_tlv_option_resource_release(dev);
14103                 flow->geneve_tlv_option = 0;
14104         }
14105         while (flow->dev_handles) {
14106                 uint32_t tmp_idx = flow->dev_handles;
14107
14108                 dev_handle = mlx5_ipool_get(priv->sh->ipool
14109                                             [MLX5_IPOOL_MLX5_FLOW], tmp_idx);
14110                 if (!dev_handle)
14111                         return;
14112                 flow->dev_handles = dev_handle->next.next;
14113                 if (dev_handle->dvh.matcher)
14114                         flow_dv_matcher_release(dev, dev_handle);
14115                 if (dev_handle->dvh.rix_sample)
14116                         flow_dv_sample_resource_release(dev, dev_handle);
14117                 if (dev_handle->dvh.rix_dest_array)
14118                         flow_dv_dest_array_resource_release(dev, dev_handle);
14119                 if (dev_handle->dvh.rix_encap_decap)
14120                         flow_dv_encap_decap_resource_release(dev,
14121                                 dev_handle->dvh.rix_encap_decap);
14122                 if (dev_handle->dvh.modify_hdr)
14123                         flow_dv_modify_hdr_resource_release(dev, dev_handle);
14124                 if (dev_handle->dvh.rix_push_vlan)
14125                         flow_dv_push_vlan_action_resource_release(dev,
14126                                                                   dev_handle);
14127                 if (dev_handle->dvh.rix_tag)
14128                         flow_dv_tag_release(dev,
14129                                             dev_handle->dvh.rix_tag);
14130                 if (dev_handle->fate_action != MLX5_FLOW_FATE_SHARED_RSS)
14131                         flow_dv_fate_resource_release(dev, dev_handle);
14132                 else if (!srss)
14133                         srss = dev_handle->rix_srss;
14134                 if (fm && dev_handle->is_meter_flow_id &&
14135                     dev_handle->split_flow_id)
14136                         mlx5_ipool_free(fm->flow_ipool,
14137                                         dev_handle->split_flow_id);
14138                 else if (dev_handle->split_flow_id &&
14139                     !dev_handle->is_meter_flow_id)
14140                         mlx5_ipool_free(priv->sh->ipool
14141                                         [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
14142                                         dev_handle->split_flow_id);
14143                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
14144                            tmp_idx);
14145         }
14146         if (srss)
14147                 flow_dv_shared_rss_action_release(dev, srss);
14148 }
14149
14150 /**
14151  * Release array of hash RX queue objects.
14152  * Helper function.
14153  *
14154  * @param[in] dev
14155  *   Pointer to the Ethernet device structure.
14156  * @param[in, out] hrxqs
14157  *   Array of hash RX queue objects.
14158  *
14159  * @return
14160  *   Total number of references to hash RX queue objects in *hrxqs* array
14161  *   after this operation.
14162  */
14163 static int
14164 __flow_dv_hrxqs_release(struct rte_eth_dev *dev,
14165                         uint32_t (*hrxqs)[MLX5_RSS_HASH_FIELDS_LEN])
14166 {
14167         size_t i;
14168         int remaining = 0;
14169
14170         for (i = 0; i < RTE_DIM(*hrxqs); i++) {
14171                 int ret = mlx5_hrxq_release(dev, (*hrxqs)[i]);
14172
14173                 if (!ret)
14174                         (*hrxqs)[i] = 0;
14175                 remaining += ret;
14176         }
14177         return remaining;
14178 }
14179
14180 /**
14181  * Release all hash RX queue objects representing shared RSS action.
14182  *
14183  * @param[in] dev
14184  *   Pointer to the Ethernet device structure.
14185  * @param[in, out] action
14186  *   Shared RSS action to remove hash RX queue objects from.
14187  *
14188  * @return
14189  *   Total number of references to hash RX queue objects stored in *action*
14190  *   after this operation.
14191  *   Expected to be 0 if no external references held.
14192  */
14193 static int
14194 __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
14195                                  struct mlx5_shared_action_rss *shared_rss)
14196 {
14197         return __flow_dv_hrxqs_release(dev, &shared_rss->hrxq);
14198 }
14199
14200 /**
14201  * Adjust L3/L4 hash value of pre-created shared RSS hrxq according to
14202  * user input.
14203  *
14204  * Only one hash value is available for one L3+L4 combination:
14205  * for example:
14206  * MLX5_RSS_HASH_IPV4, MLX5_RSS_HASH_IPV4_SRC_ONLY, and
14207  * MLX5_RSS_HASH_IPV4_DST_ONLY are mutually exclusive so they can share
14208  * same slot in mlx5_rss_hash_fields.
14209  *
14210  * @param[in] rss
14211  *   Pointer to the shared action RSS conf.
14212  * @param[in, out] hash_field
14213  *   hash_field variable needed to be adjusted.
14214  *
14215  * @return
14216  *   void
14217  */
14218 static void
14219 __flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
14220                                      uint64_t *hash_field)
14221 {
14222         uint64_t rss_types = rss->origin.types;
14223
14224         switch (*hash_field & ~IBV_RX_HASH_INNER) {
14225         case MLX5_RSS_HASH_IPV4:
14226                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
14227                         *hash_field &= ~MLX5_RSS_HASH_IPV4;
14228                         if (rss_types & ETH_RSS_L3_DST_ONLY)
14229                                 *hash_field |= IBV_RX_HASH_DST_IPV4;
14230                         else if (rss_types & ETH_RSS_L3_SRC_ONLY)
14231                                 *hash_field |= IBV_RX_HASH_SRC_IPV4;
14232                         else
14233                                 *hash_field |= MLX5_RSS_HASH_IPV4;
14234                 }
14235                 return;
14236         case MLX5_RSS_HASH_IPV6:
14237                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
14238                         *hash_field &= ~MLX5_RSS_HASH_IPV6;
14239                         if (rss_types & ETH_RSS_L3_DST_ONLY)
14240                                 *hash_field |= IBV_RX_HASH_DST_IPV6;
14241                         else if (rss_types & ETH_RSS_L3_SRC_ONLY)
14242                                 *hash_field |= IBV_RX_HASH_SRC_IPV6;
14243                         else
14244                                 *hash_field |= MLX5_RSS_HASH_IPV6;
14245                 }
14246                 return;
14247         case MLX5_RSS_HASH_IPV4_UDP:
14248                 /* fall-through. */
14249         case MLX5_RSS_HASH_IPV6_UDP:
14250                 if (rss_types & ETH_RSS_UDP) {
14251                         *hash_field &= ~MLX5_UDP_IBV_RX_HASH;
14252                         if (rss_types & ETH_RSS_L4_DST_ONLY)
14253                                 *hash_field |= IBV_RX_HASH_DST_PORT_UDP;
14254                         else if (rss_types & ETH_RSS_L4_SRC_ONLY)
14255                                 *hash_field |= IBV_RX_HASH_SRC_PORT_UDP;
14256                         else
14257                                 *hash_field |= MLX5_UDP_IBV_RX_HASH;
14258                 }
14259                 return;
14260         case MLX5_RSS_HASH_IPV4_TCP:
14261                 /* fall-through. */
14262         case MLX5_RSS_HASH_IPV6_TCP:
14263                 if (rss_types & ETH_RSS_TCP) {
14264                         *hash_field &= ~MLX5_TCP_IBV_RX_HASH;
14265                         if (rss_types & ETH_RSS_L4_DST_ONLY)
14266                                 *hash_field |= IBV_RX_HASH_DST_PORT_TCP;
14267                         else if (rss_types & ETH_RSS_L4_SRC_ONLY)
14268                                 *hash_field |= IBV_RX_HASH_SRC_PORT_TCP;
14269                         else
14270                                 *hash_field |= MLX5_TCP_IBV_RX_HASH;
14271                 }
14272                 return;
14273         default:
14274                 return;
14275         }
14276 }
14277
14278 /**
14279  * Setup shared RSS action.
14280  * Prepare set of hash RX queue objects sufficient to handle all valid
14281  * hash_fields combinations (see enum ibv_rx_hash_fields).
14282  *
14283  * @param[in] dev
14284  *   Pointer to the Ethernet device structure.
14285  * @param[in] action_idx
14286  *   Shared RSS action ipool index.
14287  * @param[in, out] action
14288  *   Partially initialized shared RSS action.
14289  * @param[out] error
14290  *   Perform verbose error reporting if not NULL. Initialized in case of
14291  *   error only.
14292  *
14293  * @return
14294  *   0 on success, otherwise negative errno value.
14295  */
14296 static int
14297 __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
14298                            uint32_t action_idx,
14299                            struct mlx5_shared_action_rss *shared_rss,
14300                            struct rte_flow_error *error)
14301 {
14302         struct mlx5_flow_rss_desc rss_desc = { 0 };
14303         size_t i;
14304         int err;
14305
14306         if (mlx5_ind_table_obj_setup(dev, shared_rss->ind_tbl)) {
14307                 return rte_flow_error_set(error, rte_errno,
14308                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14309                                           "cannot setup indirection table");
14310         }
14311         memcpy(rss_desc.key, shared_rss->origin.key, MLX5_RSS_HASH_KEY_LEN);
14312         rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
14313         rss_desc.const_q = shared_rss->origin.queue;
14314         rss_desc.queue_num = shared_rss->origin.queue_num;
14315         /* Set non-zero value to indicate a shared RSS. */
14316         rss_desc.shared_rss = action_idx;
14317         rss_desc.ind_tbl = shared_rss->ind_tbl;
14318         for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
14319                 uint32_t hrxq_idx;
14320                 uint64_t hash_fields = mlx5_rss_hash_fields[i];
14321                 int tunnel = 0;
14322
14323                 __flow_dv_action_rss_l34_hash_adjust(shared_rss, &hash_fields);
14324                 if (shared_rss->origin.level > 1) {
14325                         hash_fields |= IBV_RX_HASH_INNER;
14326                         tunnel = 1;
14327                 }
14328                 rss_desc.tunnel = tunnel;
14329                 rss_desc.hash_fields = hash_fields;
14330                 hrxq_idx = mlx5_hrxq_get(dev, &rss_desc);
14331                 if (!hrxq_idx) {
14332                         rte_flow_error_set
14333                                 (error, rte_errno,
14334                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14335                                  "cannot get hash queue");
14336                         goto error_hrxq_new;
14337                 }
14338                 err = __flow_dv_action_rss_hrxq_set
14339                         (shared_rss, hash_fields, hrxq_idx);
14340                 MLX5_ASSERT(!err);
14341         }
14342         return 0;
14343 error_hrxq_new:
14344         err = rte_errno;
14345         __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
14346         if (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true))
14347                 shared_rss->ind_tbl = NULL;
14348         rte_errno = err;
14349         return -rte_errno;
14350 }
14351
14352 /**
14353  * Create shared RSS action.
14354  *
14355  * @param[in] dev
14356  *   Pointer to the Ethernet device structure.
14357  * @param[in] conf
14358  *   Shared action configuration.
14359  * @param[in] rss
14360  *   RSS action specification used to create shared action.
14361  * @param[out] error
14362  *   Perform verbose error reporting if not NULL. Initialized in case of
14363  *   error only.
14364  *
14365  * @return
14366  *   A valid shared action ID in case of success, 0 otherwise and
14367  *   rte_errno is set.
14368  */
14369 static uint32_t
14370 __flow_dv_action_rss_create(struct rte_eth_dev *dev,
14371                             const struct rte_flow_indir_action_conf *conf,
14372                             const struct rte_flow_action_rss *rss,
14373                             struct rte_flow_error *error)
14374 {
14375         struct mlx5_priv *priv = dev->data->dev_private;
14376         struct mlx5_shared_action_rss *shared_rss = NULL;
14377         void *queue = NULL;
14378         struct rte_flow_action_rss *origin;
14379         const uint8_t *rss_key;
14380         uint32_t queue_size = rss->queue_num * sizeof(uint16_t);
14381         uint32_t idx;
14382
14383         RTE_SET_USED(conf);
14384         queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
14385                             0, SOCKET_ID_ANY);
14386         shared_rss = mlx5_ipool_zmalloc
14387                          (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], &idx);
14388         if (!shared_rss || !queue) {
14389                 rte_flow_error_set(error, ENOMEM,
14390                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14391                                    "cannot allocate resource memory");
14392                 goto error_rss_init;
14393         }
14394         if (idx > (1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET)) {
14395                 rte_flow_error_set(error, E2BIG,
14396                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14397                                    "rss action number out of range");
14398                 goto error_rss_init;
14399         }
14400         shared_rss->ind_tbl = mlx5_malloc(MLX5_MEM_ZERO,
14401                                           sizeof(*shared_rss->ind_tbl),
14402                                           0, SOCKET_ID_ANY);
14403         if (!shared_rss->ind_tbl) {
14404                 rte_flow_error_set(error, ENOMEM,
14405                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14406                                    "cannot allocate resource memory");
14407                 goto error_rss_init;
14408         }
14409         memcpy(queue, rss->queue, queue_size);
14410         shared_rss->ind_tbl->queues = queue;
14411         shared_rss->ind_tbl->queues_n = rss->queue_num;
14412         origin = &shared_rss->origin;
14413         origin->func = rss->func;
14414         origin->level = rss->level;
14415         /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
14416         origin->types = !rss->types ? ETH_RSS_IP : rss->types;
14417         /* NULL RSS key indicates default RSS key. */
14418         rss_key = !rss->key ? rss_hash_default_key : rss->key;
14419         memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
14420         origin->key = &shared_rss->key[0];
14421         origin->key_len = MLX5_RSS_HASH_KEY_LEN;
14422         origin->queue = queue;
14423         origin->queue_num = rss->queue_num;
14424         if (__flow_dv_action_rss_setup(dev, idx, shared_rss, error))
14425                 goto error_rss_init;
14426         rte_spinlock_init(&shared_rss->action_rss_sl);
14427         __atomic_add_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
14428         rte_spinlock_lock(&priv->shared_act_sl);
14429         ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14430                      &priv->rss_shared_actions, idx, shared_rss, next);
14431         rte_spinlock_unlock(&priv->shared_act_sl);
14432         return idx;
14433 error_rss_init:
14434         if (shared_rss) {
14435                 if (shared_rss->ind_tbl)
14436                         mlx5_free(shared_rss->ind_tbl);
14437                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14438                                 idx);
14439         }
14440         if (queue)
14441                 mlx5_free(queue);
14442         return 0;
14443 }
14444
14445 /**
14446  * Destroy the shared RSS action.
14447  * Release related hash RX queue objects.
14448  *
14449  * @param[in] dev
14450  *   Pointer to the Ethernet device structure.
14451  * @param[in] idx
14452  *   The shared RSS action object ID to be removed.
14453  * @param[out] error
14454  *   Perform verbose error reporting if not NULL. Initialized in case of
14455  *   error only.
14456  *
14457  * @return
14458  *   0 on success, otherwise negative errno value.
14459  */
14460 static int
14461 __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx,
14462                              struct rte_flow_error *error)
14463 {
14464         struct mlx5_priv *priv = dev->data->dev_private;
14465         struct mlx5_shared_action_rss *shared_rss =
14466             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
14467         uint32_t old_refcnt = 1;
14468         int remaining;
14469         uint16_t *queue = NULL;
14470
14471         if (!shared_rss)
14472                 return rte_flow_error_set(error, EINVAL,
14473                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14474                                           "invalid shared action");
14475         remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
14476         if (remaining)
14477                 return rte_flow_error_set(error, EBUSY,
14478                                           RTE_FLOW_ERROR_TYPE_ACTION,
14479                                           NULL,
14480                                           "shared rss hrxq has references");
14481         if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt,
14482                                          0, 0, __ATOMIC_ACQUIRE,
14483                                          __ATOMIC_RELAXED))
14484                 return rte_flow_error_set(error, EBUSY,
14485                                           RTE_FLOW_ERROR_TYPE_ACTION,
14486                                           NULL,
14487                                           "shared rss has references");
14488         queue = shared_rss->ind_tbl->queues;
14489         remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true);
14490         if (remaining)
14491                 return rte_flow_error_set(error, EBUSY,
14492                                           RTE_FLOW_ERROR_TYPE_ACTION,
14493                                           NULL,
14494                                           "shared rss indirection table has"
14495                                           " references");
14496         mlx5_free(queue);
14497         rte_spinlock_lock(&priv->shared_act_sl);
14498         ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14499                      &priv->rss_shared_actions, idx, shared_rss, next);
14500         rte_spinlock_unlock(&priv->shared_act_sl);
14501         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
14502                         idx);
14503         return 0;
14504 }
14505
14506 /**
14507  * Create indirect action, lock free,
14508  * (mutex should be acquired by caller).
14509  * Dispatcher for action type specific call.
14510  *
14511  * @param[in] dev
14512  *   Pointer to the Ethernet device structure.
14513  * @param[in] conf
14514  *   Shared action configuration.
14515  * @param[in] action
14516  *   Action specification used to create indirect action.
14517  * @param[out] error
14518  *   Perform verbose error reporting if not NULL. Initialized in case of
14519  *   error only.
14520  *
14521  * @return
14522  *   A valid shared action handle in case of success, NULL otherwise and
14523  *   rte_errno is set.
14524  */
14525 static struct rte_flow_action_handle *
14526 flow_dv_action_create(struct rte_eth_dev *dev,
14527                       const struct rte_flow_indir_action_conf *conf,
14528                       const struct rte_flow_action *action,
14529                       struct rte_flow_error *err)
14530 {
14531         struct mlx5_priv *priv = dev->data->dev_private;
14532         uint32_t age_idx = 0;
14533         uint32_t idx = 0;
14534         uint32_t ret = 0;
14535
14536         switch (action->type) {
14537         case RTE_FLOW_ACTION_TYPE_RSS:
14538                 ret = __flow_dv_action_rss_create(dev, conf, action->conf, err);
14539                 idx = (MLX5_INDIRECT_ACTION_TYPE_RSS <<
14540                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
14541                 break;
14542         case RTE_FLOW_ACTION_TYPE_AGE:
14543                 age_idx = flow_dv_aso_age_alloc(dev, err);
14544                 if (!age_idx) {
14545                         ret = -rte_errno;
14546                         break;
14547                 }
14548                 idx = (MLX5_INDIRECT_ACTION_TYPE_AGE <<
14549                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | age_idx;
14550                 flow_dv_aso_age_params_init(dev, age_idx,
14551                                         ((const struct rte_flow_action_age *)
14552                                                 action->conf)->context ?
14553                                         ((const struct rte_flow_action_age *)
14554                                                 action->conf)->context :
14555                                         (void *)(uintptr_t)idx,
14556                                         ((const struct rte_flow_action_age *)
14557                                                 action->conf)->timeout);
14558                 ret = age_idx;
14559                 break;
14560         case RTE_FLOW_ACTION_TYPE_COUNT:
14561                 ret = flow_dv_translate_create_counter(dev, NULL, NULL, NULL);
14562                 idx = (MLX5_INDIRECT_ACTION_TYPE_COUNT <<
14563                        MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret;
14564                 break;
14565         case RTE_FLOW_ACTION_TYPE_CONNTRACK:
14566                 ret = flow_dv_translate_create_conntrack(dev, action->conf,
14567                                                          err);
14568                 idx = MLX5_INDIRECT_ACT_CT_GEN_IDX(PORT_ID(priv), ret);
14569                 break;
14570         default:
14571                 rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
14572                                    NULL, "action type not supported");
14573                 break;
14574         }
14575         return ret ? (struct rte_flow_action_handle *)(uintptr_t)idx : NULL;
14576 }
14577
14578 /**
14579  * Destroy the indirect action.
14580  * Release action related resources on the NIC and the memory.
14581  * Lock free, (mutex should be acquired by caller).
14582  * Dispatcher for action type specific call.
14583  *
14584  * @param[in] dev
14585  *   Pointer to the Ethernet device structure.
14586  * @param[in] handle
14587  *   The indirect action object handle to be removed.
14588  * @param[out] error
14589  *   Perform verbose error reporting if not NULL. Initialized in case of
14590  *   error only.
14591  *
14592  * @return
14593  *   0 on success, otherwise negative errno value.
14594  */
14595 static int
14596 flow_dv_action_destroy(struct rte_eth_dev *dev,
14597                        struct rte_flow_action_handle *handle,
14598                        struct rte_flow_error *error)
14599 {
14600         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
14601         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
14602         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
14603         struct mlx5_flow_counter *cnt;
14604         uint32_t no_flow_refcnt = 1;
14605         int ret;
14606
14607         switch (type) {
14608         case MLX5_INDIRECT_ACTION_TYPE_RSS:
14609                 return __flow_dv_action_rss_release(dev, idx, error);
14610         case MLX5_INDIRECT_ACTION_TYPE_COUNT:
14611                 cnt = flow_dv_counter_get_by_idx(dev, idx, NULL);
14612                 if (!__atomic_compare_exchange_n(&cnt->shared_info.refcnt,
14613                                                  &no_flow_refcnt, 1, false,
14614                                                  __ATOMIC_ACQUIRE,
14615                                                  __ATOMIC_RELAXED))
14616                         return rte_flow_error_set(error, EBUSY,
14617                                                   RTE_FLOW_ERROR_TYPE_ACTION,
14618                                                   NULL,
14619                                                   "Indirect count action has references");
14620                 flow_dv_counter_free(dev, idx);
14621                 return 0;
14622         case MLX5_INDIRECT_ACTION_TYPE_AGE:
14623                 ret = flow_dv_aso_age_release(dev, idx);
14624                 if (ret)
14625                         /*
14626                          * In this case, the last flow has a reference will
14627                          * actually release the age action.
14628                          */
14629                         DRV_LOG(DEBUG, "Indirect age action %" PRIu32 " was"
14630                                 " released with references %d.", idx, ret);
14631                 return 0;
14632         case MLX5_INDIRECT_ACTION_TYPE_CT:
14633                 ret = flow_dv_aso_ct_release(dev, idx);
14634                 if (ret < 0)
14635                         return ret;
14636                 if (ret > 0)
14637                         DRV_LOG(DEBUG, "Connection tracking object %u still "
14638                                 "has references %d.", idx, ret);
14639                 return 0;
14640         default:
14641                 return rte_flow_error_set(error, ENOTSUP,
14642                                           RTE_FLOW_ERROR_TYPE_ACTION,
14643                                           NULL,
14644                                           "action type not supported");
14645         }
14646 }
14647
14648 /**
14649  * Updates in place shared RSS action configuration.
14650  *
14651  * @param[in] dev
14652  *   Pointer to the Ethernet device structure.
14653  * @param[in] idx
14654  *   The shared RSS action object ID to be updated.
14655  * @param[in] action_conf
14656  *   RSS action specification used to modify *shared_rss*.
14657  * @param[out] error
14658  *   Perform verbose error reporting if not NULL. Initialized in case of
14659  *   error only.
14660  *
14661  * @return
14662  *   0 on success, otherwise negative errno value.
14663  * @note: currently only support update of RSS queues.
14664  */
14665 static int
14666 __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx,
14667                             const struct rte_flow_action_rss *action_conf,
14668                             struct rte_flow_error *error)
14669 {
14670         struct mlx5_priv *priv = dev->data->dev_private;
14671         struct mlx5_shared_action_rss *shared_rss =
14672             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
14673         int ret = 0;
14674         void *queue = NULL;
14675         uint16_t *queue_old = NULL;
14676         uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);
14677
14678         if (!shared_rss)
14679                 return rte_flow_error_set(error, EINVAL,
14680                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14681                                           "invalid shared action to update");
14682         if (priv->obj_ops.ind_table_modify == NULL)
14683                 return rte_flow_error_set(error, ENOTSUP,
14684                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14685                                           "cannot modify indirection table");
14686         queue = mlx5_malloc(MLX5_MEM_ZERO,
14687                             RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
14688                             0, SOCKET_ID_ANY);
14689         if (!queue)
14690                 return rte_flow_error_set(error, ENOMEM,
14691                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14692                                           NULL,
14693                                           "cannot allocate resource memory");
14694         memcpy(queue, action_conf->queue, queue_size);
14695         MLX5_ASSERT(shared_rss->ind_tbl);
14696         rte_spinlock_lock(&shared_rss->action_rss_sl);
14697         queue_old = shared_rss->ind_tbl->queues;
14698         ret = mlx5_ind_table_obj_modify(dev, shared_rss->ind_tbl,
14699                                         queue, action_conf->queue_num, true);
14700         if (ret) {
14701                 mlx5_free(queue);
14702                 ret = rte_flow_error_set(error, rte_errno,
14703                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
14704                                           "cannot update indirection table");
14705         } else {
14706                 mlx5_free(queue_old);
14707                 shared_rss->origin.queue = queue;
14708                 shared_rss->origin.queue_num = action_conf->queue_num;
14709         }
14710         rte_spinlock_unlock(&shared_rss->action_rss_sl);
14711         return ret;
14712 }
14713
14714 /*
14715  * Updates in place conntrack context or direction.
14716  * Context update should be synchronized.
14717  *
14718  * @param[in] dev
14719  *   Pointer to the Ethernet device structure.
14720  * @param[in] idx
14721  *   The conntrack object ID to be updated.
14722  * @param[in] update
14723  *   Pointer to the structure of information to update.
14724  * @param[out] error
14725  *   Perform verbose error reporting if not NULL. Initialized in case of
14726  *   error only.
14727  *
14728  * @return
14729  *   0 on success, otherwise negative errno value.
14730  */
14731 static int
14732 __flow_dv_action_ct_update(struct rte_eth_dev *dev, uint32_t idx,
14733                            const struct rte_flow_modify_conntrack *update,
14734                            struct rte_flow_error *error)
14735 {
14736         struct mlx5_priv *priv = dev->data->dev_private;
14737         struct mlx5_aso_ct_action *ct;
14738         const struct rte_flow_action_conntrack *new_prf;
14739         int ret = 0;
14740         uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx);
14741         uint32_t dev_idx;
14742
14743         if (PORT_ID(priv) != owner)
14744                 return rte_flow_error_set(error, EACCES,
14745                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14746                                           NULL,
14747                                           "CT object owned by another port");
14748         dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx);
14749         ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx);
14750         if (!ct->refcnt)
14751                 return rte_flow_error_set(error, ENOMEM,
14752                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14753                                           NULL,
14754                                           "CT object is inactive");
14755         new_prf = &update->new_ct;
14756         if (update->direction)
14757                 ct->is_original = !!new_prf->is_original_dir;
14758         if (update->state) {
14759                 /* Only validate the profile when it needs to be updated. */
14760                 ret = mlx5_validate_action_ct(dev, new_prf, error);
14761                 if (ret)
14762                         return ret;
14763                 ret = mlx5_aso_ct_update_by_wqe(priv->sh, ct, new_prf);
14764                 if (ret)
14765                         return rte_flow_error_set(error, EIO,
14766                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14767                                         NULL,
14768                                         "Failed to send CT context update WQE");
14769                 /* Block until ready or a failure. */
14770                 ret = mlx5_aso_ct_available(priv->sh, ct);
14771                 if (ret)
14772                         rte_flow_error_set(error, rte_errno,
14773                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14774                                            NULL,
14775                                            "Timeout to get the CT update");
14776         }
14777         return ret;
14778 }
14779
14780 /**
14781  * Updates in place shared action configuration, lock free,
14782  * (mutex should be acquired by caller).
14783  *
14784  * @param[in] dev
14785  *   Pointer to the Ethernet device structure.
14786  * @param[in] handle
14787  *   The indirect action object handle to be updated.
14788  * @param[in] update
14789  *   Action specification used to modify the action pointed by *handle*.
14790  *   *update* could be of same type with the action pointed by the *handle*
14791  *   handle argument, or some other structures like a wrapper, depending on
14792  *   the indirect action type.
14793  * @param[out] error
14794  *   Perform verbose error reporting if not NULL. Initialized in case of
14795  *   error only.
14796  *
14797  * @return
14798  *   0 on success, otherwise negative errno value.
14799  */
14800 static int
14801 flow_dv_action_update(struct rte_eth_dev *dev,
14802                         struct rte_flow_action_handle *handle,
14803                         const void *update,
14804                         struct rte_flow_error *err)
14805 {
14806         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
14807         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
14808         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
14809         const void *action_conf;
14810
14811         switch (type) {
14812         case MLX5_INDIRECT_ACTION_TYPE_RSS:
14813                 action_conf = ((const struct rte_flow_action *)update)->conf;
14814                 return __flow_dv_action_rss_update(dev, idx, action_conf, err);
14815         case MLX5_INDIRECT_ACTION_TYPE_CT:
14816                 return __flow_dv_action_ct_update(dev, idx, update, err);
14817         default:
14818                 return rte_flow_error_set(err, ENOTSUP,
14819                                           RTE_FLOW_ERROR_TYPE_ACTION,
14820                                           NULL,
14821                                           "action type update not supported");
14822         }
14823 }
14824
14825 /**
14826  * Destroy the meter sub policy table rules.
14827  * Lock free, (mutex should be acquired by caller).
14828  *
14829  * @param[in] dev
14830  *   Pointer to Ethernet device.
14831  * @param[in] sub_policy
14832  *   Pointer to meter sub policy table.
14833  */
14834 static void
14835 __flow_dv_destroy_sub_policy_rules(struct rte_eth_dev *dev,
14836                              struct mlx5_flow_meter_sub_policy *sub_policy)
14837 {
14838         struct mlx5_priv *priv = dev->data->dev_private;
14839         struct mlx5_flow_tbl_data_entry *tbl;
14840         struct mlx5_flow_meter_policy *policy = sub_policy->main_policy;
14841         struct mlx5_flow_meter_info *next_fm;
14842         struct mlx5_sub_policy_color_rule *color_rule;
14843         void *tmp;
14844         uint32_t i;
14845
14846         for (i = 0; i < RTE_COLORS; i++) {
14847                 next_fm = NULL;
14848                 if (i == RTE_COLOR_GREEN && policy &&
14849                     policy->act_cnt[i].fate_action == MLX5_FLOW_FATE_MTR)
14850                         next_fm = mlx5_flow_meter_find(priv,
14851                                         policy->act_cnt[i].next_mtr_id, NULL);
14852                 TAILQ_FOREACH_SAFE(color_rule, &sub_policy->color_rules[i],
14853                                    next_port, tmp) {
14854                         claim_zero(mlx5_flow_os_destroy_flow(color_rule->rule));
14855                         tbl = container_of(color_rule->matcher->tbl,
14856                                         typeof(*tbl), tbl);
14857                         mlx5_list_unregister(&tbl->matchers,
14858                                                 &color_rule->matcher->entry);
14859                         TAILQ_REMOVE(&sub_policy->color_rules[i],
14860                                         color_rule, next_port);
14861                         mlx5_free(color_rule);
14862                         if (next_fm)
14863                                 mlx5_flow_meter_detach(priv, next_fm);
14864                 }
14865         }
14866         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
14867                 if (sub_policy->rix_hrxq[i]) {
14868                         if (policy && !policy->is_hierarchy)
14869                                 mlx5_hrxq_release(dev, sub_policy->rix_hrxq[i]);
14870                         sub_policy->rix_hrxq[i] = 0;
14871                 }
14872                 if (sub_policy->jump_tbl[i]) {
14873                         flow_dv_tbl_resource_release(MLX5_SH(dev),
14874                         sub_policy->jump_tbl[i]);
14875                         sub_policy->jump_tbl[i] = NULL;
14876                 }
14877         }
14878         if (sub_policy->tbl_rsc) {
14879                 flow_dv_tbl_resource_release(MLX5_SH(dev),
14880                         sub_policy->tbl_rsc);
14881                 sub_policy->tbl_rsc = NULL;
14882         }
14883 }
14884
14885 /**
14886  * Destroy policy rules, lock free,
14887  * (mutex should be acquired by caller).
14888  * Dispatcher for action type specific call.
14889  *
14890  * @param[in] dev
14891  *   Pointer to the Ethernet device structure.
14892  * @param[in] mtr_policy
14893  *   Meter policy struct.
14894  */
14895 static void
14896 flow_dv_destroy_policy_rules(struct rte_eth_dev *dev,
14897                       struct mlx5_flow_meter_policy *mtr_policy)
14898 {
14899         uint32_t i, j;
14900         struct mlx5_flow_meter_sub_policy *sub_policy;
14901         uint16_t sub_policy_num;
14902
14903         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
14904                 sub_policy_num = (mtr_policy->sub_policy_num >>
14905                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
14906                         MLX5_MTR_SUB_POLICY_NUM_MASK;
14907                 for (j = 0; j < sub_policy_num; j++) {
14908                         sub_policy = mtr_policy->sub_policys[i][j];
14909                         if (sub_policy)
14910                                 __flow_dv_destroy_sub_policy_rules
14911                                                 (dev, sub_policy);
14912                 }
14913         }
14914 }
14915
14916 /**
14917  * Destroy policy action, lock free,
14918  * (mutex should be acquired by caller).
14919  * Dispatcher for action type specific call.
14920  *
14921  * @param[in] dev
14922  *   Pointer to the Ethernet device structure.
14923  * @param[in] mtr_policy
14924  *   Meter policy struct.
14925  */
14926 static void
14927 flow_dv_destroy_mtr_policy_acts(struct rte_eth_dev *dev,
14928                       struct mlx5_flow_meter_policy *mtr_policy)
14929 {
14930         struct rte_flow_action *rss_action;
14931         struct mlx5_flow_handle dev_handle;
14932         uint32_t i, j;
14933
14934         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
14935                 if (mtr_policy->act_cnt[i].rix_mark) {
14936                         flow_dv_tag_release(dev,
14937                                 mtr_policy->act_cnt[i].rix_mark);
14938                         mtr_policy->act_cnt[i].rix_mark = 0;
14939                 }
14940                 if (mtr_policy->act_cnt[i].modify_hdr) {
14941                         dev_handle.dvh.modify_hdr =
14942                                 mtr_policy->act_cnt[i].modify_hdr;
14943                         flow_dv_modify_hdr_resource_release(dev, &dev_handle);
14944                 }
14945                 switch (mtr_policy->act_cnt[i].fate_action) {
14946                 case MLX5_FLOW_FATE_SHARED_RSS:
14947                         rss_action = mtr_policy->act_cnt[i].rss;
14948                         mlx5_free(rss_action);
14949                         break;
14950                 case MLX5_FLOW_FATE_PORT_ID:
14951                         if (mtr_policy->act_cnt[i].rix_port_id_action) {
14952                                 flow_dv_port_id_action_resource_release(dev,
14953                                 mtr_policy->act_cnt[i].rix_port_id_action);
14954                                 mtr_policy->act_cnt[i].rix_port_id_action = 0;
14955                         }
14956                         break;
14957                 case MLX5_FLOW_FATE_DROP:
14958                 case MLX5_FLOW_FATE_JUMP:
14959                         for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
14960                                 mtr_policy->act_cnt[i].dr_jump_action[j] =
14961                                                 NULL;
14962                         break;
14963                 default:
14964                         /*Queue action do nothing*/
14965                         break;
14966                 }
14967         }
14968         for (j = 0; j < MLX5_MTR_DOMAIN_MAX; j++)
14969                 mtr_policy->dr_drop_action[j] = NULL;
14970 }
14971
14972 /**
14973  * Create policy action per domain, lock free,
14974  * (mutex should be acquired by caller).
14975  * Dispatcher for action type specific call.
14976  *
14977  * @param[in] dev
14978  *   Pointer to the Ethernet device structure.
14979  * @param[in] mtr_policy
14980  *   Meter policy struct.
14981  * @param[in] action
14982  *   Action specification used to create meter actions.
14983  * @param[out] error
14984  *   Perform verbose error reporting if not NULL. Initialized in case of
14985  *   error only.
14986  *
14987  * @return
14988  *   0 on success, otherwise negative errno value.
14989  */
14990 static int
14991 __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,
14992                         struct mlx5_flow_meter_policy *mtr_policy,
14993                         const struct rte_flow_action *actions[RTE_COLORS],
14994                         enum mlx5_meter_domain domain,
14995                         struct rte_mtr_error *error)
14996 {
14997         struct mlx5_priv *priv = dev->data->dev_private;
14998         struct rte_flow_error flow_err;
14999         const struct rte_flow_action *act;
15000         uint64_t action_flags = 0;
15001         struct mlx5_flow_handle dh;
15002         struct mlx5_flow dev_flow;
15003         struct mlx5_flow_dv_port_id_action_resource port_id_action;
15004         int i, ret;
15005         uint8_t egress, transfer;
15006         struct mlx5_meter_policy_action_container *act_cnt = NULL;
15007         union {
15008                 struct mlx5_flow_dv_modify_hdr_resource res;
15009                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
15010                             sizeof(struct mlx5_modification_cmd) *
15011                             (MLX5_MAX_MODIFY_NUM + 1)];
15012         } mhdr_dummy;
15013         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
15014
15015         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
15016         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
15017         memset(&dh, 0, sizeof(struct mlx5_flow_handle));
15018         memset(&dev_flow, 0, sizeof(struct mlx5_flow));
15019         memset(&port_id_action, 0,
15020                 sizeof(struct mlx5_flow_dv_port_id_action_resource));
15021         memset(mhdr_res, 0, sizeof(*mhdr_res));
15022         mhdr_res->ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
15023                                         egress ?
15024                                         MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
15025                                         MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
15026         dev_flow.handle = &dh;
15027         dev_flow.dv.port_id_action = &port_id_action;
15028         dev_flow.external = true;
15029         for (i = 0; i < RTE_COLORS; i++) {
15030                 if (i < MLX5_MTR_RTE_COLORS)
15031                         act_cnt = &mtr_policy->act_cnt[i];
15032                 for (act = actions[i];
15033                         act && act->type != RTE_FLOW_ACTION_TYPE_END;
15034                         act++) {
15035                         switch (act->type) {
15036                         case RTE_FLOW_ACTION_TYPE_MARK:
15037                         {
15038                                 uint32_t tag_be = mlx5_flow_mark_set
15039                                         (((const struct rte_flow_action_mark *)
15040                                         (act->conf))->id);
15041
15042                                 if (i >= MLX5_MTR_RTE_COLORS)
15043                                         return -rte_mtr_error_set(error,
15044                                           ENOTSUP,
15045                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15046                                           NULL,
15047                                           "cannot create policy "
15048                                           "mark action for this color");
15049                                 dev_flow.handle->mark = 1;
15050                                 if (flow_dv_tag_resource_register(dev, tag_be,
15051                                                   &dev_flow, &flow_err))
15052                                         return -rte_mtr_error_set(error,
15053                                         ENOTSUP,
15054                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15055                                         NULL,
15056                                         "cannot setup policy mark action");
15057                                 MLX5_ASSERT(dev_flow.dv.tag_resource);
15058                                 act_cnt->rix_mark =
15059                                         dev_flow.handle->dvh.rix_tag;
15060                                 action_flags |= MLX5_FLOW_ACTION_MARK;
15061                                 break;
15062                         }
15063                         case RTE_FLOW_ACTION_TYPE_SET_TAG:
15064                                 if (i >= MLX5_MTR_RTE_COLORS)
15065                                         return -rte_mtr_error_set(error,
15066                                           ENOTSUP,
15067                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15068                                           NULL,
15069                                           "cannot create policy "
15070                                           "set tag action for this color");
15071                                 if (flow_dv_convert_action_set_tag
15072                                 (dev, mhdr_res,
15073                                 (const struct rte_flow_action_set_tag *)
15074                                 act->conf,  &flow_err))
15075                                         return -rte_mtr_error_set(error,
15076                                         ENOTSUP,
15077                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15078                                         NULL, "cannot convert policy "
15079                                         "set tag action");
15080                                 if (!mhdr_res->actions_num)
15081                                         return -rte_mtr_error_set(error,
15082                                         ENOTSUP,
15083                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15084                                         NULL, "cannot find policy "
15085                                         "set tag action");
15086                                 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
15087                                 break;
15088                         case RTE_FLOW_ACTION_TYPE_DROP:
15089                         {
15090                                 struct mlx5_flow_mtr_mng *mtrmng =
15091                                                 priv->sh->mtrmng;
15092                                 struct mlx5_flow_tbl_data_entry *tbl_data;
15093
15094                                 /*
15095                                  * Create the drop table with
15096                                  * METER DROP level.
15097                                  */
15098                                 if (!mtrmng->drop_tbl[domain]) {
15099                                         mtrmng->drop_tbl[domain] =
15100                                         flow_dv_tbl_resource_get(dev,
15101                                         MLX5_FLOW_TABLE_LEVEL_METER,
15102                                         egress, transfer, false, NULL, 0,
15103                                         0, MLX5_MTR_TABLE_ID_DROP, &flow_err);
15104                                         if (!mtrmng->drop_tbl[domain])
15105                                                 return -rte_mtr_error_set
15106                                         (error, ENOTSUP,
15107                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15108                                         NULL,
15109                                         "Failed to create meter drop table");
15110                                 }
15111                                 tbl_data = container_of
15112                                 (mtrmng->drop_tbl[domain],
15113                                 struct mlx5_flow_tbl_data_entry, tbl);
15114                                 if (i < MLX5_MTR_RTE_COLORS) {
15115                                         act_cnt->dr_jump_action[domain] =
15116                                                 tbl_data->jump.action;
15117                                         act_cnt->fate_action =
15118                                                 MLX5_FLOW_FATE_DROP;
15119                                 }
15120                                 if (i == RTE_COLOR_RED)
15121                                         mtr_policy->dr_drop_action[domain] =
15122                                                 tbl_data->jump.action;
15123                                 action_flags |= MLX5_FLOW_ACTION_DROP;
15124                                 break;
15125                         }
15126                         case RTE_FLOW_ACTION_TYPE_QUEUE:
15127                         {
15128                                 if (i >= MLX5_MTR_RTE_COLORS)
15129                                         return -rte_mtr_error_set(error,
15130                                         ENOTSUP,
15131                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15132                                         NULL, "cannot create policy "
15133                                         "fate queue for this color");
15134                                 act_cnt->queue =
15135                                 ((const struct rte_flow_action_queue *)
15136                                         (act->conf))->index;
15137                                 act_cnt->fate_action =
15138                                         MLX5_FLOW_FATE_QUEUE;
15139                                 dev_flow.handle->fate_action =
15140                                         MLX5_FLOW_FATE_QUEUE;
15141                                 mtr_policy->is_queue = 1;
15142                                 action_flags |= MLX5_FLOW_ACTION_QUEUE;
15143                                 break;
15144                         }
15145                         case RTE_FLOW_ACTION_TYPE_RSS:
15146                         {
15147                                 int rss_size;
15148
15149                                 if (i >= MLX5_MTR_RTE_COLORS)
15150                                         return -rte_mtr_error_set(error,
15151                                           ENOTSUP,
15152                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15153                                           NULL,
15154                                           "cannot create policy "
15155                                           "rss action for this color");
15156                                 /*
15157                                  * Save RSS conf into policy struct
15158                                  * for translate stage.
15159                                  */
15160                                 rss_size = (int)rte_flow_conv
15161                                         (RTE_FLOW_CONV_OP_ACTION,
15162                                         NULL, 0, act, &flow_err);
15163                                 if (rss_size <= 0)
15164                                         return -rte_mtr_error_set(error,
15165                                           ENOTSUP,
15166                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15167                                           NULL, "Get the wrong "
15168                                           "rss action struct size");
15169                                 act_cnt->rss = mlx5_malloc(MLX5_MEM_ZERO,
15170                                                 rss_size, 0, SOCKET_ID_ANY);
15171                                 if (!act_cnt->rss)
15172                                         return -rte_mtr_error_set(error,
15173                                           ENOTSUP,
15174                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15175                                           NULL,
15176                                           "Fail to malloc rss action memory");
15177                                 ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION,
15178                                         act_cnt->rss, rss_size,
15179                                         act, &flow_err);
15180                                 if (ret < 0)
15181                                         return -rte_mtr_error_set(error,
15182                                           ENOTSUP,
15183                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15184                                           NULL, "Fail to save "
15185                                           "rss action into policy struct");
15186                                 act_cnt->fate_action =
15187                                         MLX5_FLOW_FATE_SHARED_RSS;
15188                                 action_flags |= MLX5_FLOW_ACTION_RSS;
15189                                 break;
15190                         }
15191                         case RTE_FLOW_ACTION_TYPE_PORT_ID:
15192                         {
15193                                 struct mlx5_flow_dv_port_id_action_resource
15194                                         port_id_resource;
15195                                 uint32_t port_id = 0;
15196
15197                                 if (i >= MLX5_MTR_RTE_COLORS)
15198                                         return -rte_mtr_error_set(error,
15199                                         ENOTSUP,
15200                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15201                                         NULL, "cannot create policy "
15202                                         "port action for this color");
15203                                 memset(&port_id_resource, 0,
15204                                         sizeof(port_id_resource));
15205                                 if (flow_dv_translate_action_port_id(dev, act,
15206                                                 &port_id, &flow_err))
15207                                         return -rte_mtr_error_set(error,
15208                                         ENOTSUP,
15209                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15210                                         NULL, "cannot translate "
15211                                         "policy port action");
15212                                 port_id_resource.port_id = port_id;
15213                                 if (flow_dv_port_id_action_resource_register
15214                                         (dev, &port_id_resource,
15215                                         &dev_flow, &flow_err))
15216                                         return -rte_mtr_error_set(error,
15217                                         ENOTSUP,
15218                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15219                                         NULL, "cannot setup "
15220                                         "policy port action");
15221                                 act_cnt->rix_port_id_action =
15222                                         dev_flow.handle->rix_port_id_action;
15223                                 act_cnt->fate_action =
15224                                         MLX5_FLOW_FATE_PORT_ID;
15225                                 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
15226                                 break;
15227                         }
15228                         case RTE_FLOW_ACTION_TYPE_JUMP:
15229                         {
15230                                 uint32_t jump_group = 0;
15231                                 uint32_t table = 0;
15232                                 struct mlx5_flow_tbl_data_entry *tbl_data;
15233                                 struct flow_grp_info grp_info = {
15234                                         .external = !!dev_flow.external,
15235                                         .transfer = !!transfer,
15236                                         .fdb_def_rule = !!priv->fdb_def_rule,
15237                                         .std_tbl_fix = 0,
15238                                         .skip_scale = dev_flow.skip_scale &
15239                                         (1 << MLX5_SCALE_FLOW_GROUP_BIT),
15240                                 };
15241                                 struct mlx5_flow_meter_sub_policy *sub_policy =
15242                                 mtr_policy->sub_policys[domain][0];
15243
15244                                 if (i >= MLX5_MTR_RTE_COLORS)
15245                                         return -rte_mtr_error_set(error,
15246                                           ENOTSUP,
15247                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15248                                           NULL,
15249                                           "cannot create policy "
15250                                           "jump action for this color");
15251                                 jump_group =
15252                                 ((const struct rte_flow_action_jump *)
15253                                                         act->conf)->group;
15254                                 if (mlx5_flow_group_to_table(dev, NULL,
15255                                                        jump_group,
15256                                                        &table,
15257                                                        &grp_info, &flow_err))
15258                                         return -rte_mtr_error_set(error,
15259                                         ENOTSUP,
15260                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15261                                         NULL, "cannot setup "
15262                                         "policy jump action");
15263                                 sub_policy->jump_tbl[i] =
15264                                 flow_dv_tbl_resource_get(dev,
15265                                         table, egress,
15266                                         transfer,
15267                                         !!dev_flow.external,
15268                                         NULL, jump_group, 0,
15269                                         0, &flow_err);
15270                                 if
15271                                 (!sub_policy->jump_tbl[i])
15272                                         return  -rte_mtr_error_set(error,
15273                                         ENOTSUP,
15274                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
15275                                         NULL, "cannot create jump action.");
15276                                 tbl_data = container_of
15277                                 (sub_policy->jump_tbl[i],
15278                                 struct mlx5_flow_tbl_data_entry, tbl);
15279                                 act_cnt->dr_jump_action[domain] =
15280                                         tbl_data->jump.action;
15281                                 act_cnt->fate_action =
15282                                         MLX5_FLOW_FATE_JUMP;
15283                                 action_flags |= MLX5_FLOW_ACTION_JUMP;
15284                                 break;
15285                         }
15286                         case RTE_FLOW_ACTION_TYPE_METER:
15287                         {
15288                                 const struct rte_flow_action_meter *mtr;
15289                                 struct mlx5_flow_meter_info *next_fm;
15290                                 struct mlx5_flow_meter_policy *next_policy;
15291                                 struct rte_flow_action tag_action;
15292                                 struct mlx5_rte_flow_action_set_tag set_tag;
15293                                 uint32_t next_mtr_idx = 0;
15294
15295                                 mtr = act->conf;
15296                                 next_fm = mlx5_flow_meter_find(priv,
15297                                                         mtr->mtr_id,
15298                                                         &next_mtr_idx);
15299                                 if (!next_fm)
15300                                         return -rte_mtr_error_set(error, EINVAL,
15301                                                 RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
15302                                                 "Fail to find next meter.");
15303                                 if (next_fm->def_policy)
15304                                         return -rte_mtr_error_set(error, EINVAL,
15305                                                 RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
15306                                 "Hierarchy only supports termination meter.");
15307                                 next_policy = mlx5_flow_meter_policy_find(dev,
15308                                                 next_fm->policy_id, NULL);
15309                                 MLX5_ASSERT(next_policy);
15310                                 if (next_fm->drop_cnt) {
15311                                         set_tag.id =
15312                                                 (enum modify_reg)
15313                                                 mlx5_flow_get_reg_id(dev,
15314                                                 MLX5_MTR_ID,
15315                                                 0,
15316                                                 (struct rte_flow_error *)error);
15317                                         set_tag.offset = (priv->mtr_reg_share ?
15318                                                 MLX5_MTR_COLOR_BITS : 0);
15319                                         set_tag.length = (priv->mtr_reg_share ?
15320                                                MLX5_MTR_IDLE_BITS_IN_COLOR_REG :
15321                                                MLX5_REG_BITS);
15322                                         set_tag.data = next_mtr_idx;
15323                                         tag_action.type =
15324                                                 (enum rte_flow_action_type)
15325                                                 MLX5_RTE_FLOW_ACTION_TYPE_TAG;
15326                                         tag_action.conf = &set_tag;
15327                                         if (flow_dv_convert_action_set_reg
15328                                                 (mhdr_res, &tag_action,
15329                                                 (struct rte_flow_error *)error))
15330                                                 return -rte_errno;
15331                                         action_flags |=
15332                                                 MLX5_FLOW_ACTION_SET_TAG;
15333                                 }
15334                                 act_cnt->fate_action = MLX5_FLOW_FATE_MTR;
15335                                 act_cnt->next_mtr_id = next_fm->meter_id;
15336                                 act_cnt->next_sub_policy = NULL;
15337                                 mtr_policy->is_hierarchy = 1;
15338                                 mtr_policy->dev = next_policy->dev;
15339                                 action_flags |=
15340                                 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
15341                                 break;
15342                         }
15343                         default:
15344                                 return -rte_mtr_error_set(error, ENOTSUP,
15345                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
15346                                           NULL, "action type not supported");
15347                         }
15348                         if (action_flags & MLX5_FLOW_ACTION_SET_TAG) {
15349                                 /* create modify action if needed. */
15350                                 dev_flow.dv.group = 1;
15351                                 if (flow_dv_modify_hdr_resource_register
15352                                         (dev, mhdr_res, &dev_flow, &flow_err))
15353                                         return -rte_mtr_error_set(error,
15354                                                 ENOTSUP,
15355                                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
15356                                                 NULL, "cannot register policy "
15357                                                 "set tag action");
15358                                 act_cnt->modify_hdr =
15359                                         dev_flow.handle->dvh.modify_hdr;
15360                         }
15361                 }
15362         }
15363         return 0;
15364 }
15365
15366 /**
15367  * Create policy action per domain, lock free,
15368  * (mutex should be acquired by caller).
15369  * Dispatcher for action type specific call.
15370  *
15371  * @param[in] dev
15372  *   Pointer to the Ethernet device structure.
15373  * @param[in] mtr_policy
15374  *   Meter policy struct.
15375  * @param[in] action
15376  *   Action specification used to create meter actions.
15377  * @param[out] error
15378  *   Perform verbose error reporting if not NULL. Initialized in case of
15379  *   error only.
15380  *
15381  * @return
15382  *   0 on success, otherwise negative errno value.
15383  */
15384 static int
15385 flow_dv_create_mtr_policy_acts(struct rte_eth_dev *dev,
15386                       struct mlx5_flow_meter_policy *mtr_policy,
15387                       const struct rte_flow_action *actions[RTE_COLORS],
15388                       struct rte_mtr_error *error)
15389 {
15390         int ret, i;
15391         uint16_t sub_policy_num;
15392
15393         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15394                 sub_policy_num = (mtr_policy->sub_policy_num >>
15395                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
15396                         MLX5_MTR_SUB_POLICY_NUM_MASK;
15397                 if (sub_policy_num) {
15398                         ret = __flow_dv_create_domain_policy_acts(dev,
15399                                 mtr_policy, actions,
15400                                 (enum mlx5_meter_domain)i, error);
15401                         if (ret)
15402                                 return ret;
15403                 }
15404         }
15405         return 0;
15406 }
15407
15408 /**
15409  * Query a DV flow rule for its statistics via DevX.
15410  *
15411  * @param[in] dev
15412  *   Pointer to Ethernet device.
15413  * @param[in] cnt_idx
15414  *   Index to the flow counter.
15415  * @param[out] data
15416  *   Data retrieved by the query.
15417  * @param[out] error
15418  *   Perform verbose error reporting if not NULL.
15419  *
15420  * @return
15421  *   0 on success, a negative errno value otherwise and rte_errno is set.
15422  */
15423 static int
15424 flow_dv_query_count(struct rte_eth_dev *dev, uint32_t cnt_idx, void *data,
15425                     struct rte_flow_error *error)
15426 {
15427         struct mlx5_priv *priv = dev->data->dev_private;
15428         struct rte_flow_query_count *qc = data;
15429
15430         if (!priv->config.devx)
15431                 return rte_flow_error_set(error, ENOTSUP,
15432                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15433                                           NULL,
15434                                           "counters are not supported");
15435         if (cnt_idx) {
15436                 uint64_t pkts, bytes;
15437                 struct mlx5_flow_counter *cnt;
15438                 int err = _flow_dv_query_count(dev, cnt_idx, &pkts, &bytes);
15439
15440                 if (err)
15441                         return rte_flow_error_set(error, -err,
15442                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15443                                         NULL, "cannot read counters");
15444                 cnt = flow_dv_counter_get_by_idx(dev, cnt_idx, NULL);
15445                 qc->hits_set = 1;
15446                 qc->bytes_set = 1;
15447                 qc->hits = pkts - cnt->hits;
15448                 qc->bytes = bytes - cnt->bytes;
15449                 if (qc->reset) {
15450                         cnt->hits = pkts;
15451                         cnt->bytes = bytes;
15452                 }
15453                 return 0;
15454         }
15455         return rte_flow_error_set(error, EINVAL,
15456                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15457                                   NULL,
15458                                   "counters are not available");
15459 }
15460
15461 static int
15462 flow_dv_action_query(struct rte_eth_dev *dev,
15463                      const struct rte_flow_action_handle *handle, void *data,
15464                      struct rte_flow_error *error)
15465 {
15466         struct mlx5_age_param *age_param;
15467         struct rte_flow_query_age *resp;
15468         uint32_t act_idx = (uint32_t)(uintptr_t)handle;
15469         uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
15470         uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
15471         struct mlx5_priv *priv = dev->data->dev_private;
15472         struct mlx5_aso_ct_action *ct;
15473         uint16_t owner;
15474         uint32_t dev_idx;
15475
15476         switch (type) {
15477         case MLX5_INDIRECT_ACTION_TYPE_AGE:
15478                 age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params;
15479                 resp = data;
15480                 resp->aged = __atomic_load_n(&age_param->state,
15481                                               __ATOMIC_RELAXED) == AGE_TMOUT ?
15482                                                                           1 : 0;
15483                 resp->sec_since_last_hit_valid = !resp->aged;
15484                 if (resp->sec_since_last_hit_valid)
15485                         resp->sec_since_last_hit = __atomic_load_n
15486                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
15487                 return 0;
15488         case MLX5_INDIRECT_ACTION_TYPE_COUNT:
15489                 return flow_dv_query_count(dev, idx, data, error);
15490         case MLX5_INDIRECT_ACTION_TYPE_CT:
15491                 owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx);
15492                 if (owner != PORT_ID(priv))
15493                         return rte_flow_error_set(error, EACCES,
15494                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15495                                         NULL,
15496                                         "CT object owned by another port");
15497                 dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx);
15498                 ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx);
15499                 MLX5_ASSERT(ct);
15500                 if (!ct->refcnt)
15501                         return rte_flow_error_set(error, EFAULT,
15502                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15503                                         NULL,
15504                                         "CT object is inactive");
15505                 ((struct rte_flow_action_conntrack *)data)->peer_port =
15506                                                         ct->peer;
15507                 ((struct rte_flow_action_conntrack *)data)->is_original_dir =
15508                                                         ct->is_original;
15509                 if (mlx5_aso_ct_query_by_wqe(priv->sh, ct, data))
15510                         return rte_flow_error_set(error, EIO,
15511                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15512                                         NULL,
15513                                         "Failed to query CT context");
15514                 return 0;
15515         default:
15516                 return rte_flow_error_set(error, ENOTSUP,
15517                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
15518                                           "action type query not supported");
15519         }
15520 }
15521
15522 /**
15523  * Query a flow rule AGE action for aging information.
15524  *
15525  * @param[in] dev
15526  *   Pointer to Ethernet device.
15527  * @param[in] flow
15528  *   Pointer to the sub flow.
15529  * @param[out] data
15530  *   data retrieved by the query.
15531  * @param[out] error
15532  *   Perform verbose error reporting if not NULL.
15533  *
15534  * @return
15535  *   0 on success, a negative errno value otherwise and rte_errno is set.
15536  */
15537 static int
15538 flow_dv_query_age(struct rte_eth_dev *dev, struct rte_flow *flow,
15539                   void *data, struct rte_flow_error *error)
15540 {
15541         struct rte_flow_query_age *resp = data;
15542         struct mlx5_age_param *age_param;
15543
15544         if (flow->age) {
15545                 struct mlx5_aso_age_action *act =
15546                                      flow_aso_age_get_by_idx(dev, flow->age);
15547
15548                 age_param = &act->age_params;
15549         } else if (flow->counter) {
15550                 age_param = flow_dv_counter_idx_get_age(dev, flow->counter);
15551
15552                 if (!age_param || !age_param->timeout)
15553                         return rte_flow_error_set
15554                                         (error, EINVAL,
15555                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15556                                          NULL, "cannot read age data");
15557         } else {
15558                 return rte_flow_error_set(error, EINVAL,
15559                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15560                                           NULL, "age data not available");
15561         }
15562         resp->aged = __atomic_load_n(&age_param->state, __ATOMIC_RELAXED) ==
15563                                      AGE_TMOUT ? 1 : 0;
15564         resp->sec_since_last_hit_valid = !resp->aged;
15565         if (resp->sec_since_last_hit_valid)
15566                 resp->sec_since_last_hit = __atomic_load_n
15567                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
15568         return 0;
15569 }
15570
15571 /**
15572  * Query a flow.
15573  *
15574  * @see rte_flow_query()
15575  * @see rte_flow_ops
15576  */
15577 static int
15578 flow_dv_query(struct rte_eth_dev *dev,
15579               struct rte_flow *flow __rte_unused,
15580               const struct rte_flow_action *actions __rte_unused,
15581               void *data __rte_unused,
15582               struct rte_flow_error *error __rte_unused)
15583 {
15584         int ret = -EINVAL;
15585
15586         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
15587                 switch (actions->type) {
15588                 case RTE_FLOW_ACTION_TYPE_VOID:
15589                         break;
15590                 case RTE_FLOW_ACTION_TYPE_COUNT:
15591                         ret = flow_dv_query_count(dev, flow->counter, data,
15592                                                   error);
15593                         break;
15594                 case RTE_FLOW_ACTION_TYPE_AGE:
15595                         ret = flow_dv_query_age(dev, flow, data, error);
15596                         break;
15597                 default:
15598                         return rte_flow_error_set(error, ENOTSUP,
15599                                                   RTE_FLOW_ERROR_TYPE_ACTION,
15600                                                   actions,
15601                                                   "action not supported");
15602                 }
15603         }
15604         return ret;
15605 }
15606
15607 /**
15608  * Destroy the meter table set.
15609  * Lock free, (mutex should be acquired by caller).
15610  *
15611  * @param[in] dev
15612  *   Pointer to Ethernet device.
15613  * @param[in] fm
15614  *   Meter information table.
15615  */
15616 static void
15617 flow_dv_destroy_mtr_tbls(struct rte_eth_dev *dev,
15618                         struct mlx5_flow_meter_info *fm)
15619 {
15620         struct mlx5_priv *priv = dev->data->dev_private;
15621         int i;
15622
15623         if (!fm || !priv->config.dv_flow_en)
15624                 return;
15625         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15626                 if (fm->drop_rule[i]) {
15627                         claim_zero(mlx5_flow_os_destroy_flow(fm->drop_rule[i]));
15628                         fm->drop_rule[i] = NULL;
15629                 }
15630         }
15631 }
15632
15633 static void
15634 flow_dv_destroy_mtr_drop_tbls(struct rte_eth_dev *dev)
15635 {
15636         struct mlx5_priv *priv = dev->data->dev_private;
15637         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
15638         struct mlx5_flow_tbl_data_entry *tbl;
15639         int i, j;
15640
15641         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
15642                 if (mtrmng->def_rule[i]) {
15643                         claim_zero(mlx5_flow_os_destroy_flow
15644                                         (mtrmng->def_rule[i]));
15645                         mtrmng->def_rule[i] = NULL;
15646                 }
15647                 if (mtrmng->def_matcher[i]) {
15648                         tbl = container_of(mtrmng->def_matcher[i]->tbl,
15649                                 struct mlx5_flow_tbl_data_entry, tbl);
15650                         mlx5_list_unregister(&tbl->matchers,
15651                                              &mtrmng->def_matcher[i]->entry);
15652                         mtrmng->def_matcher[i] = NULL;
15653                 }
15654                 for (j = 0; j < MLX5_REG_BITS; j++) {
15655                         if (mtrmng->drop_matcher[i][j]) {
15656                                 tbl =
15657                                 container_of(mtrmng->drop_matcher[i][j]->tbl,
15658                                              struct mlx5_flow_tbl_data_entry,
15659                                              tbl);
15660                                 mlx5_list_unregister(&tbl->matchers,
15661                                             &mtrmng->drop_matcher[i][j]->entry);
15662                                 mtrmng->drop_matcher[i][j] = NULL;
15663                         }
15664                 }
15665                 if (mtrmng->drop_tbl[i]) {
15666                         flow_dv_tbl_resource_release(MLX5_SH(dev),
15667                                 mtrmng->drop_tbl[i]);
15668                         mtrmng->drop_tbl[i] = NULL;
15669                 }
15670         }
15671 }
15672
15673 /* Number of meter flow actions, count and jump or count and drop. */
15674 #define METER_ACTIONS 2
15675
15676 static void
15677 __flow_dv_destroy_domain_def_policy(struct rte_eth_dev *dev,
15678                               enum mlx5_meter_domain domain)
15679 {
15680         struct mlx5_priv *priv = dev->data->dev_private;
15681         struct mlx5_flow_meter_def_policy *def_policy =
15682                         priv->sh->mtrmng->def_policy[domain];
15683
15684         __flow_dv_destroy_sub_policy_rules(dev, &def_policy->sub_policy);
15685         mlx5_free(def_policy);
15686         priv->sh->mtrmng->def_policy[domain] = NULL;
15687 }
15688
15689 /**
15690  * Destroy the default policy table set.
15691  *
15692  * @param[in] dev
15693  *   Pointer to Ethernet device.
15694  */
15695 static void
15696 flow_dv_destroy_def_policy(struct rte_eth_dev *dev)
15697 {
15698         struct mlx5_priv *priv = dev->data->dev_private;
15699         int i;
15700
15701         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++)
15702                 if (priv->sh->mtrmng->def_policy[i])
15703                         __flow_dv_destroy_domain_def_policy(dev,
15704                                         (enum mlx5_meter_domain)i);
15705         priv->sh->mtrmng->def_policy_id = MLX5_INVALID_POLICY_ID;
15706 }
15707
15708 static int
15709 __flow_dv_create_policy_flow(struct rte_eth_dev *dev,
15710                         uint32_t color_reg_c_idx,
15711                         enum rte_color color, void *matcher_object,
15712                         int actions_n, void *actions,
15713                         bool match_src_port, const struct rte_flow_item *item,
15714                         void **rule, const struct rte_flow_attr *attr)
15715 {
15716         int ret;
15717         struct mlx5_flow_dv_match_params value = {
15718                 .size = sizeof(value.buf),
15719         };
15720         struct mlx5_flow_dv_match_params matcher = {
15721                 .size = sizeof(matcher.buf),
15722         };
15723         struct mlx5_priv *priv = dev->data->dev_private;
15724         uint8_t misc_mask;
15725
15726         if (match_src_port && (priv->representor || priv->master)) {
15727                 if (flow_dv_translate_item_port_id(dev, matcher.buf,
15728                                                    value.buf, item, attr)) {
15729                         DRV_LOG(ERR,
15730                         "Failed to create meter policy flow with port.");
15731                         return -1;
15732                 }
15733         }
15734         flow_dv_match_meta_reg(matcher.buf, value.buf,
15735                                 (enum modify_reg)color_reg_c_idx,
15736                                 rte_col_2_mlx5_col(color),
15737                                 UINT32_MAX);
15738         misc_mask = flow_dv_matcher_enable(value.buf);
15739         __flow_dv_adjust_buf_size(&value.size, misc_mask);
15740         ret = mlx5_flow_os_create_flow(matcher_object,
15741                         (void *)&value, actions_n, actions, rule);
15742         if (ret) {
15743                 DRV_LOG(ERR, "Failed to create meter policy flow.");
15744                 return -1;
15745         }
15746         return 0;
15747 }
15748
15749 static int
15750 __flow_dv_create_policy_matcher(struct rte_eth_dev *dev,
15751                         uint32_t color_reg_c_idx,
15752                         uint16_t priority,
15753                         struct mlx5_flow_meter_sub_policy *sub_policy,
15754                         const struct rte_flow_attr *attr,
15755                         bool match_src_port,
15756                         const struct rte_flow_item *item,
15757                         struct mlx5_flow_dv_matcher **policy_matcher,
15758                         struct rte_flow_error *error)
15759 {
15760         struct mlx5_list_entry *entry;
15761         struct mlx5_flow_tbl_resource *tbl_rsc = sub_policy->tbl_rsc;
15762         struct mlx5_flow_dv_matcher matcher = {
15763                 .mask = {
15764                         .size = sizeof(matcher.mask.buf),
15765                 },
15766                 .tbl = tbl_rsc,
15767         };
15768         struct mlx5_flow_dv_match_params value = {
15769                 .size = sizeof(value.buf),
15770         };
15771         struct mlx5_flow_cb_ctx ctx = {
15772                 .error = error,
15773                 .data = &matcher,
15774         };
15775         struct mlx5_flow_tbl_data_entry *tbl_data;
15776         struct mlx5_priv *priv = dev->data->dev_private;
15777         uint32_t color_mask = (UINT32_C(1) << MLX5_MTR_COLOR_BITS) - 1;
15778
15779         if (match_src_port && (priv->representor || priv->master)) {
15780                 if (flow_dv_translate_item_port_id(dev, matcher.mask.buf,
15781                                                    value.buf, item, attr)) {
15782                         DRV_LOG(ERR,
15783                         "Failed to register meter drop matcher with port.");
15784                         return -1;
15785                 }
15786         }
15787         tbl_data = container_of(tbl_rsc, struct mlx5_flow_tbl_data_entry, tbl);
15788         if (priority < RTE_COLOR_RED)
15789                 flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
15790                         (enum modify_reg)color_reg_c_idx, 0, color_mask);
15791         matcher.priority = priority;
15792         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
15793                                         matcher.mask.size);
15794         entry = mlx5_list_register(&tbl_data->matchers, &ctx);
15795         if (!entry) {
15796                 DRV_LOG(ERR, "Failed to register meter drop matcher.");
15797                 return -1;
15798         }
15799         *policy_matcher =
15800                 container_of(entry, struct mlx5_flow_dv_matcher, entry);
15801         return 0;
15802 }
15803
15804 /**
15805  * Create the policy rules per domain.
15806  *
15807  * @param[in] dev
15808  *   Pointer to Ethernet device.
15809  * @param[in] sub_policy
15810  *    Pointer to sub policy table..
15811  * @param[in] egress
15812  *   Direction of the table.
15813  * @param[in] transfer
15814  *   E-Switch or NIC flow.
15815  * @param[in] acts
15816  *   Pointer to policy action list per color.
15817  *
15818  * @return
15819  *   0 on success, -1 otherwise.
15820  */
15821 static int
15822 __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev,
15823                 struct mlx5_flow_meter_sub_policy *sub_policy,
15824                 uint8_t egress, uint8_t transfer, bool match_src_port,
15825                 struct mlx5_meter_policy_acts acts[RTE_COLORS])
15826 {
15827         struct mlx5_priv *priv = dev->data->dev_private;
15828         struct rte_flow_error flow_err;
15829         uint32_t color_reg_c_idx;
15830         struct rte_flow_attr attr = {
15831                 .group = MLX5_FLOW_TABLE_LEVEL_POLICY,
15832                 .priority = 0,
15833                 .ingress = 0,
15834                 .egress = !!egress,
15835                 .transfer = !!transfer,
15836                 .reserved = 0,
15837         };
15838         int i;
15839         int ret = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &flow_err);
15840         struct mlx5_sub_policy_color_rule *color_rule;
15841
15842         if (ret < 0)
15843                 return -1;
15844         /* Create policy table with POLICY level. */
15845         if (!sub_policy->tbl_rsc)
15846                 sub_policy->tbl_rsc = flow_dv_tbl_resource_get(dev,
15847                                 MLX5_FLOW_TABLE_LEVEL_POLICY,
15848                                 egress, transfer, false, NULL, 0, 0,
15849                                 sub_policy->idx, &flow_err);
15850         if (!sub_policy->tbl_rsc) {
15851                 DRV_LOG(ERR,
15852                         "Failed to create meter sub policy table.");
15853                 return -1;
15854         }
15855         /* Prepare matchers. */
15856         color_reg_c_idx = ret;
15857         for (i = 0; i < RTE_COLORS; i++) {
15858                 TAILQ_INIT(&sub_policy->color_rules[i]);
15859                 if (i == RTE_COLOR_YELLOW || !acts[i].actions_n)
15860                         continue;
15861                 color_rule = mlx5_malloc(MLX5_MEM_ZERO,
15862                                 sizeof(struct mlx5_sub_policy_color_rule),
15863                                 0, SOCKET_ID_ANY);
15864                 if (!color_rule) {
15865                         DRV_LOG(ERR, "No memory to create color rule.");
15866                         goto err_exit;
15867                 }
15868                 color_rule->src_port = priv->representor_id;
15869                 attr.priority = i;
15870                 /* Create matchers for Color. */
15871                 if (__flow_dv_create_policy_matcher(dev,
15872                                 color_reg_c_idx, i, sub_policy, &attr,
15873                                 (i != RTE_COLOR_RED ? match_src_port : false),
15874                                 NULL, &color_rule->matcher, &flow_err)) {
15875                         DRV_LOG(ERR, "Failed to create color matcher.");
15876                         goto err_exit;
15877                 }
15878                 /* Create flow, matching color. */
15879                 if (__flow_dv_create_policy_flow(dev,
15880                                 color_reg_c_idx, (enum rte_color)i,
15881                                 color_rule->matcher->matcher_object,
15882                                 acts[i].actions_n,
15883                                 acts[i].dv_actions,
15884                                 (i != RTE_COLOR_RED ? match_src_port : false),
15885                                 NULL, &color_rule->rule,
15886                                 &attr)) {
15887                         DRV_LOG(ERR, "Failed to create color rule.");
15888                         goto err_exit;
15889                 }
15890                 TAILQ_INSERT_TAIL(&sub_policy->color_rules[i],
15891                                   color_rule, next_port);
15892         }
15893         return 0;
15894 err_exit:
15895         if (color_rule) {
15896                 if (color_rule->rule)
15897                         mlx5_flow_os_destroy_flow(color_rule->rule);
15898                 if (color_rule->matcher) {
15899                         struct mlx5_flow_tbl_data_entry *tbl =
15900                                 container_of(color_rule->matcher->tbl,
15901                                                 typeof(*tbl), tbl);
15902                         mlx5_list_unregister(&tbl->matchers,
15903                                                 &color_rule->matcher->entry);
15904                 }
15905                 mlx5_free(color_rule);
15906         }
15907         return -1;
15908 }
15909
15910 static int
15911 __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev,
15912                         struct mlx5_flow_meter_policy *mtr_policy,
15913                         struct mlx5_flow_meter_sub_policy *sub_policy,
15914                         uint32_t domain)
15915 {
15916         struct mlx5_priv *priv = dev->data->dev_private;
15917         struct mlx5_meter_policy_acts acts[RTE_COLORS];
15918         struct mlx5_flow_dv_tag_resource *tag;
15919         struct mlx5_flow_dv_port_id_action_resource *port_action;
15920         struct mlx5_hrxq *hrxq;
15921         struct mlx5_flow_meter_info *next_fm = NULL;
15922         struct mlx5_flow_meter_policy *next_policy;
15923         struct mlx5_flow_meter_sub_policy *next_sub_policy;
15924         struct mlx5_flow_tbl_data_entry *tbl_data;
15925         struct rte_flow_error error;
15926         uint8_t egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
15927         uint8_t transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
15928         bool mtr_first = egress || (transfer && priv->representor_id != UINT16_MAX);
15929         bool match_src_port = false;
15930         int i;
15931
15932         for (i = 0; i < RTE_COLORS; i++) {
15933                 acts[i].actions_n = 0;
15934                 if (i == RTE_COLOR_YELLOW)
15935                         continue;
15936                 if (i == RTE_COLOR_RED) {
15937                         /* Only support drop on red. */
15938                         acts[i].dv_actions[0] =
15939                         mtr_policy->dr_drop_action[domain];
15940                         acts[i].actions_n = 1;
15941                         continue;
15942                 }
15943                 if (mtr_policy->act_cnt[i].fate_action == MLX5_FLOW_FATE_MTR) {
15944                         struct rte_flow_attr attr = {
15945                                 .transfer = transfer
15946                         };
15947
15948                         next_fm = mlx5_flow_meter_find(priv,
15949                                         mtr_policy->act_cnt[i].next_mtr_id,
15950                                         NULL);
15951                         if (!next_fm) {
15952                                 DRV_LOG(ERR,
15953                                         "Failed to get next hierarchy meter.");
15954                                 goto err_exit;
15955                         }
15956                         if (mlx5_flow_meter_attach(priv, next_fm,
15957                                                    &attr, &error)) {
15958                                 DRV_LOG(ERR, "%s", error.message);
15959                                 next_fm = NULL;
15960                                 goto err_exit;
15961                         }
15962                         /* Meter action must be the first for TX. */
15963                         if (mtr_first) {
15964                                 acts[i].dv_actions[acts[i].actions_n] =
15965                                         next_fm->meter_action;
15966                                 acts[i].actions_n++;
15967                         }
15968                 }
15969                 if (mtr_policy->act_cnt[i].rix_mark) {
15970                         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG],
15971                                         mtr_policy->act_cnt[i].rix_mark);
15972                         if (!tag) {
15973                                 DRV_LOG(ERR, "Failed to find "
15974                                 "mark action for policy.");
15975                                 goto err_exit;
15976                         }
15977                         acts[i].dv_actions[acts[i].actions_n] =
15978                                                 tag->action;
15979                         acts[i].actions_n++;
15980                 }
15981                 if (mtr_policy->act_cnt[i].modify_hdr) {
15982                         acts[i].dv_actions[acts[i].actions_n] =
15983                         mtr_policy->act_cnt[i].modify_hdr->action;
15984                         acts[i].actions_n++;
15985                 }
15986                 if (mtr_policy->act_cnt[i].fate_action) {
15987                         switch (mtr_policy->act_cnt[i].fate_action) {
15988                         case MLX5_FLOW_FATE_PORT_ID:
15989                                 port_action = mlx5_ipool_get
15990                                         (priv->sh->ipool[MLX5_IPOOL_PORT_ID],
15991                                 mtr_policy->act_cnt[i].rix_port_id_action);
15992                                 if (!port_action) {
15993                                         DRV_LOG(ERR, "Failed to find "
15994                                                 "port action for policy.");
15995                                         goto err_exit;
15996                                 }
15997                                 acts[i].dv_actions[acts[i].actions_n] =
15998                                 port_action->action;
15999                                 acts[i].actions_n++;
16000                                 mtr_policy->dev = dev;
16001                                 match_src_port = true;
16002                                 break;
16003                         case MLX5_FLOW_FATE_DROP:
16004                         case MLX5_FLOW_FATE_JUMP:
16005                                 acts[i].dv_actions[acts[i].actions_n] =
16006                                 mtr_policy->act_cnt[i].dr_jump_action[domain];
16007                                 acts[i].actions_n++;
16008                                 break;
16009                         case MLX5_FLOW_FATE_SHARED_RSS:
16010                         case MLX5_FLOW_FATE_QUEUE:
16011                                 hrxq = mlx5_ipool_get
16012                                 (priv->sh->ipool[MLX5_IPOOL_HRXQ],
16013                                 sub_policy->rix_hrxq[i]);
16014                                 if (!hrxq) {
16015                                         DRV_LOG(ERR, "Failed to find "
16016                                                 "queue action for policy.");
16017                                         goto err_exit;
16018                                 }
16019                                 acts[i].dv_actions[acts[i].actions_n] =
16020                                 hrxq->action;
16021                                 acts[i].actions_n++;
16022                                 break;
16023                         case MLX5_FLOW_FATE_MTR:
16024                                 if (!next_fm) {
16025                                         DRV_LOG(ERR,
16026                                                 "No next hierarchy meter.");
16027                                         goto err_exit;
16028                                 }
16029                                 if (!mtr_first) {
16030                                         acts[i].dv_actions[acts[i].actions_n] =
16031                                                         next_fm->meter_action;
16032                                         acts[i].actions_n++;
16033                                 }
16034                                 if (mtr_policy->act_cnt[i].next_sub_policy) {
16035                                         next_sub_policy =
16036                                         mtr_policy->act_cnt[i].next_sub_policy;
16037                                 } else {
16038                                         next_policy =
16039                                                 mlx5_flow_meter_policy_find(dev,
16040                                                 next_fm->policy_id, NULL);
16041                                         MLX5_ASSERT(next_policy);
16042                                         next_sub_policy =
16043                                         next_policy->sub_policys[domain][0];
16044                                 }
16045                                 tbl_data =
16046                                         container_of(next_sub_policy->tbl_rsc,
16047                                         struct mlx5_flow_tbl_data_entry, tbl);
16048                                 acts[i].dv_actions[acts[i].actions_n++] =
16049                                                         tbl_data->jump.action;
16050                                 if (mtr_policy->act_cnt[i].modify_hdr)
16051                                         match_src_port = !!transfer;
16052                                 break;
16053                         default:
16054                                 /*Queue action do nothing*/
16055                                 break;
16056                         }
16057                 }
16058         }
16059         if (__flow_dv_create_domain_policy_rules(dev, sub_policy,
16060                                 egress, transfer, match_src_port, acts)) {
16061                 DRV_LOG(ERR,
16062                 "Failed to create policy rules per domain.");
16063                 goto err_exit;
16064         }
16065         return 0;
16066 err_exit:
16067         if (next_fm)
16068                 mlx5_flow_meter_detach(priv, next_fm);
16069         return -1;
16070 }
16071
16072 /**
16073  * Create the policy rules.
16074  *
16075  * @param[in] dev
16076  *   Pointer to Ethernet device.
16077  * @param[in,out] mtr_policy
16078  *   Pointer to meter policy table.
16079  *
16080  * @return
16081  *   0 on success, -1 otherwise.
16082  */
16083 static int
16084 flow_dv_create_policy_rules(struct rte_eth_dev *dev,
16085                              struct mlx5_flow_meter_policy *mtr_policy)
16086 {
16087         int i;
16088         uint16_t sub_policy_num;
16089
16090         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16091                 sub_policy_num = (mtr_policy->sub_policy_num >>
16092                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) &
16093                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16094                 if (!sub_policy_num)
16095                         continue;
16096                 /* Prepare actions list and create policy rules. */
16097                 if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
16098                         mtr_policy->sub_policys[i][0], i)) {
16099                         DRV_LOG(ERR,
16100                         "Failed to create policy action list per domain.");
16101                         return -1;
16102                 }
16103         }
16104         return 0;
16105 }
16106
16107 static int
16108 __flow_dv_create_domain_def_policy(struct rte_eth_dev *dev, uint32_t domain)
16109 {
16110         struct mlx5_priv *priv = dev->data->dev_private;
16111         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
16112         struct mlx5_flow_meter_def_policy *def_policy;
16113         struct mlx5_flow_tbl_resource *jump_tbl;
16114         struct mlx5_flow_tbl_data_entry *tbl_data;
16115         uint8_t egress, transfer;
16116         struct rte_flow_error error;
16117         struct mlx5_meter_policy_acts acts[RTE_COLORS];
16118         int ret;
16119
16120         egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
16121         transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
16122         def_policy = mtrmng->def_policy[domain];
16123         if (!def_policy) {
16124                 def_policy = mlx5_malloc(MLX5_MEM_ZERO,
16125                         sizeof(struct mlx5_flow_meter_def_policy),
16126                         RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
16127                 if (!def_policy) {
16128                         DRV_LOG(ERR, "Failed to alloc "
16129                                         "default policy table.");
16130                         goto def_policy_error;
16131                 }
16132                 mtrmng->def_policy[domain] = def_policy;
16133                 /* Create the meter suffix table with SUFFIX level. */
16134                 jump_tbl = flow_dv_tbl_resource_get(dev,
16135                                 MLX5_FLOW_TABLE_LEVEL_METER,
16136                                 egress, transfer, false, NULL, 0,
16137                                 0, MLX5_MTR_TABLE_ID_SUFFIX, &error);
16138                 if (!jump_tbl) {
16139                         DRV_LOG(ERR,
16140                                 "Failed to create meter suffix table.");
16141                         goto def_policy_error;
16142                 }
16143                 def_policy->sub_policy.jump_tbl[RTE_COLOR_GREEN] = jump_tbl;
16144                 tbl_data = container_of(jump_tbl,
16145                                 struct mlx5_flow_tbl_data_entry, tbl);
16146                 def_policy->dr_jump_action[RTE_COLOR_GREEN] =
16147                                                 tbl_data->jump.action;
16148                 acts[RTE_COLOR_GREEN].dv_actions[0] =
16149                                                 tbl_data->jump.action;
16150                 acts[RTE_COLOR_GREEN].actions_n = 1;
16151                 /* Create jump action to the drop table. */
16152                 if (!mtrmng->drop_tbl[domain]) {
16153                         mtrmng->drop_tbl[domain] = flow_dv_tbl_resource_get
16154                                 (dev, MLX5_FLOW_TABLE_LEVEL_METER,
16155                                 egress, transfer, false, NULL, 0,
16156                                 0, MLX5_MTR_TABLE_ID_DROP, &error);
16157                         if (!mtrmng->drop_tbl[domain]) {
16158                                 DRV_LOG(ERR, "Failed to create "
16159                                 "meter drop table for default policy.");
16160                                 goto def_policy_error;
16161                         }
16162                 }
16163                 tbl_data = container_of(mtrmng->drop_tbl[domain],
16164                                 struct mlx5_flow_tbl_data_entry, tbl);
16165                 def_policy->dr_jump_action[RTE_COLOR_RED] =
16166                                                 tbl_data->jump.action;
16167                 acts[RTE_COLOR_RED].dv_actions[0] = tbl_data->jump.action;
16168                 acts[RTE_COLOR_RED].actions_n = 1;
16169                 /* Create default policy rules. */
16170                 ret = __flow_dv_create_domain_policy_rules(dev,
16171                                         &def_policy->sub_policy,
16172                                         egress, transfer, false, acts);
16173                 if (ret) {
16174                         DRV_LOG(ERR, "Failed to create "
16175                                 "default policy rules.");
16176                                 goto def_policy_error;
16177                 }
16178         }
16179         return 0;
16180 def_policy_error:
16181         __flow_dv_destroy_domain_def_policy(dev,
16182                         (enum mlx5_meter_domain)domain);
16183         return -1;
16184 }
16185
16186 /**
16187  * Create the default policy table set.
16188  *
16189  * @param[in] dev
16190  *   Pointer to Ethernet device.
16191  * @return
16192  *   0 on success, -1 otherwise.
16193  */
16194 static int
16195 flow_dv_create_def_policy(struct rte_eth_dev *dev)
16196 {
16197         struct mlx5_priv *priv = dev->data->dev_private;
16198         int i;
16199
16200         /* Non-termination policy table. */
16201         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16202                 if (!priv->config.dv_esw_en && i == MLX5_MTR_DOMAIN_TRANSFER)
16203                         continue;
16204                 if (__flow_dv_create_domain_def_policy(dev, i)) {
16205                         DRV_LOG(ERR,
16206                         "Failed to create default policy");
16207                         return -1;
16208                 }
16209         }
16210         return 0;
16211 }
16212
16213 /**
16214  * Create the needed meter tables.
16215  * Lock free, (mutex should be acquired by caller).
16216  *
16217  * @param[in] dev
16218  *   Pointer to Ethernet device.
16219  * @param[in] fm
16220  *   Meter information table.
16221  * @param[in] mtr_idx
16222  *   Meter index.
16223  * @param[in] domain_bitmap
16224  *   Domain bitmap.
16225  * @return
16226  *   0 on success, -1 otherwise.
16227  */
16228 static int
16229 flow_dv_create_mtr_tbls(struct rte_eth_dev *dev,
16230                         struct mlx5_flow_meter_info *fm,
16231                         uint32_t mtr_idx,
16232                         uint8_t domain_bitmap)
16233 {
16234         struct mlx5_priv *priv = dev->data->dev_private;
16235         struct mlx5_flow_mtr_mng *mtrmng = priv->sh->mtrmng;
16236         struct rte_flow_error error;
16237         struct mlx5_flow_tbl_data_entry *tbl_data;
16238         uint8_t egress, transfer;
16239         void *actions[METER_ACTIONS];
16240         int domain, ret, i;
16241         struct mlx5_flow_counter *cnt;
16242         struct mlx5_flow_dv_match_params value = {
16243                 .size = sizeof(value.buf),
16244         };
16245         struct mlx5_flow_dv_match_params matcher_para = {
16246                 .size = sizeof(matcher_para.buf),
16247         };
16248         int mtr_id_reg_c = mlx5_flow_get_reg_id(dev, MLX5_MTR_ID,
16249                                                      0, &error);
16250         uint32_t mtr_id_mask = (UINT32_C(1) << mtrmng->max_mtr_bits) - 1;
16251         uint8_t mtr_id_offset = priv->mtr_reg_share ? MLX5_MTR_COLOR_BITS : 0;
16252         struct mlx5_list_entry *entry;
16253         struct mlx5_flow_dv_matcher matcher = {
16254                 .mask = {
16255                         .size = sizeof(matcher.mask.buf),
16256                 },
16257         };
16258         struct mlx5_flow_dv_matcher *drop_matcher;
16259         struct mlx5_flow_cb_ctx ctx = {
16260                 .error = &error,
16261                 .data = &matcher,
16262         };
16263         uint8_t misc_mask;
16264
16265         if (!priv->mtr_en || mtr_id_reg_c < 0) {
16266                 rte_errno = ENOTSUP;
16267                 return -1;
16268         }
16269         for (domain = 0; domain < MLX5_MTR_DOMAIN_MAX; domain++) {
16270                 if (!(domain_bitmap & (1 << domain)) ||
16271                         (mtrmng->def_rule[domain] && !fm->drop_cnt))
16272                         continue;
16273                 egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
16274                 transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
16275                 /* Create the drop table with METER DROP level. */
16276                 if (!mtrmng->drop_tbl[domain]) {
16277                         mtrmng->drop_tbl[domain] = flow_dv_tbl_resource_get(dev,
16278                                         MLX5_FLOW_TABLE_LEVEL_METER,
16279                                         egress, transfer, false, NULL, 0,
16280                                         0, MLX5_MTR_TABLE_ID_DROP, &error);
16281                         if (!mtrmng->drop_tbl[domain]) {
16282                                 DRV_LOG(ERR, "Failed to create meter drop table.");
16283                                 goto policy_error;
16284                         }
16285                 }
16286                 /* Create default matcher in drop table. */
16287                 matcher.tbl = mtrmng->drop_tbl[domain],
16288                 tbl_data = container_of(mtrmng->drop_tbl[domain],
16289                                 struct mlx5_flow_tbl_data_entry, tbl);
16290                 if (!mtrmng->def_matcher[domain]) {
16291                         flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
16292                                        (enum modify_reg)mtr_id_reg_c,
16293                                        0, 0);
16294                         matcher.priority = MLX5_MTRS_DEFAULT_RULE_PRIORITY;
16295                         matcher.crc = rte_raw_cksum
16296                                         ((const void *)matcher.mask.buf,
16297                                         matcher.mask.size);
16298                         entry = mlx5_list_register(&tbl_data->matchers, &ctx);
16299                         if (!entry) {
16300                                 DRV_LOG(ERR, "Failed to register meter "
16301                                 "drop default matcher.");
16302                                 goto policy_error;
16303                         }
16304                         mtrmng->def_matcher[domain] = container_of(entry,
16305                         struct mlx5_flow_dv_matcher, entry);
16306                 }
16307                 /* Create default rule in drop table. */
16308                 if (!mtrmng->def_rule[domain]) {
16309                         i = 0;
16310                         actions[i++] = priv->sh->dr_drop_action;
16311                         flow_dv_match_meta_reg(matcher_para.buf, value.buf,
16312                                 (enum modify_reg)mtr_id_reg_c, 0, 0);
16313                         misc_mask = flow_dv_matcher_enable(value.buf);
16314                         __flow_dv_adjust_buf_size(&value.size, misc_mask);
16315                         ret = mlx5_flow_os_create_flow
16316                                 (mtrmng->def_matcher[domain]->matcher_object,
16317                                 (void *)&value, i, actions,
16318                                 &mtrmng->def_rule[domain]);
16319                         if (ret) {
16320                                 DRV_LOG(ERR, "Failed to create meter "
16321                                 "default drop rule for drop table.");
16322                                 goto policy_error;
16323                         }
16324                 }
16325                 if (!fm->drop_cnt)
16326                         continue;
16327                 MLX5_ASSERT(mtrmng->max_mtr_bits);
16328                 if (!mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1]) {
16329                         /* Create matchers for Drop. */
16330                         flow_dv_match_meta_reg(matcher.mask.buf, value.buf,
16331                                         (enum modify_reg)mtr_id_reg_c, 0,
16332                                         (mtr_id_mask << mtr_id_offset));
16333                         matcher.priority = MLX5_REG_BITS - mtrmng->max_mtr_bits;
16334                         matcher.crc = rte_raw_cksum
16335                                         ((const void *)matcher.mask.buf,
16336                                         matcher.mask.size);
16337                         entry = mlx5_list_register(&tbl_data->matchers, &ctx);
16338                         if (!entry) {
16339                                 DRV_LOG(ERR,
16340                                 "Failed to register meter drop matcher.");
16341                                 goto policy_error;
16342                         }
16343                         mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1] =
16344                                 container_of(entry, struct mlx5_flow_dv_matcher,
16345                                              entry);
16346                 }
16347                 drop_matcher =
16348                         mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1];
16349                 /* Create drop rule, matching meter_id only. */
16350                 flow_dv_match_meta_reg(matcher_para.buf, value.buf,
16351                                 (enum modify_reg)mtr_id_reg_c,
16352                                 (mtr_idx << mtr_id_offset), UINT32_MAX);
16353                 i = 0;
16354                 cnt = flow_dv_counter_get_by_idx(dev,
16355                                         fm->drop_cnt, NULL);
16356                 actions[i++] = cnt->action;
16357                 actions[i++] = priv->sh->dr_drop_action;
16358                 misc_mask = flow_dv_matcher_enable(value.buf);
16359                 __flow_dv_adjust_buf_size(&value.size, misc_mask);
16360                 ret = mlx5_flow_os_create_flow(drop_matcher->matcher_object,
16361                                                (void *)&value, i, actions,
16362                                                &fm->drop_rule[domain]);
16363                 if (ret) {
16364                         DRV_LOG(ERR, "Failed to create meter "
16365                                 "drop rule for drop table.");
16366                                 goto policy_error;
16367                 }
16368         }
16369         return 0;
16370 policy_error:
16371         for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
16372                 if (fm->drop_rule[i]) {
16373                         claim_zero(mlx5_flow_os_destroy_flow
16374                                 (fm->drop_rule[i]));
16375                         fm->drop_rule[i] = NULL;
16376                 }
16377         }
16378         return -1;
16379 }
16380
16381 static struct mlx5_flow_meter_sub_policy *
16382 __flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev,
16383                 struct mlx5_flow_meter_policy *mtr_policy,
16384                 struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS],
16385                 struct mlx5_flow_meter_sub_policy *next_sub_policy,
16386                 bool *is_reuse)
16387 {
16388         struct mlx5_priv *priv = dev->data->dev_private;
16389         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
16390         uint32_t sub_policy_idx = 0;
16391         uint32_t hrxq_idx[MLX5_MTR_RTE_COLORS] = {0};
16392         uint32_t i, j;
16393         struct mlx5_hrxq *hrxq;
16394         struct mlx5_flow_handle dh;
16395         struct mlx5_meter_policy_action_container *act_cnt;
16396         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
16397         uint16_t sub_policy_num;
16398
16399         rte_spinlock_lock(&mtr_policy->sl);
16400         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
16401                 if (!rss_desc[i])
16402                         continue;
16403                 hrxq_idx[i] = mlx5_hrxq_get(dev, rss_desc[i]);
16404                 if (!hrxq_idx[i]) {
16405                         rte_spinlock_unlock(&mtr_policy->sl);
16406                         return NULL;
16407                 }
16408         }
16409         sub_policy_num = (mtr_policy->sub_policy_num >>
16410                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16411                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16412         for (i = 0; i < sub_policy_num;
16413                 i++) {
16414                 for (j = 0; j < MLX5_MTR_RTE_COLORS; j++) {
16415                         if (rss_desc[j] &&
16416                                 hrxq_idx[j] !=
16417                         mtr_policy->sub_policys[domain][i]->rix_hrxq[j])
16418                                 break;
16419                 }
16420                 if (j >= MLX5_MTR_RTE_COLORS) {
16421                         /*
16422                          * Found the sub policy table with
16423                          * the same queue per color
16424                          */
16425                         rte_spinlock_unlock(&mtr_policy->sl);
16426                         for (j = 0; j < MLX5_MTR_RTE_COLORS; j++)
16427                                 mlx5_hrxq_release(dev, hrxq_idx[j]);
16428                         *is_reuse = true;
16429                         return mtr_policy->sub_policys[domain][i];
16430                 }
16431         }
16432         /* Create sub policy. */
16433         if (!mtr_policy->sub_policys[domain][0]->rix_hrxq[0]) {
16434                 /* Reuse the first dummy sub_policy*/
16435                 sub_policy = mtr_policy->sub_policys[domain][0];
16436                 sub_policy_idx = sub_policy->idx;
16437         } else {
16438                 sub_policy = mlx5_ipool_zmalloc
16439                                 (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16440                                 &sub_policy_idx);
16441                 if (!sub_policy ||
16442                         sub_policy_idx > MLX5_MAX_SUB_POLICY_TBL_NUM) {
16443                         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++)
16444                                 mlx5_hrxq_release(dev, hrxq_idx[i]);
16445                         goto rss_sub_policy_error;
16446                 }
16447                 sub_policy->idx = sub_policy_idx;
16448                 sub_policy->main_policy = mtr_policy;
16449         }
16450         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
16451                 if (!rss_desc[i])
16452                         continue;
16453                 sub_policy->rix_hrxq[i] = hrxq_idx[i];
16454                 if (mtr_policy->is_hierarchy) {
16455                         act_cnt = &mtr_policy->act_cnt[i];
16456                         act_cnt->next_sub_policy = next_sub_policy;
16457                         mlx5_hrxq_release(dev, hrxq_idx[i]);
16458                 } else {
16459                         /*
16460                          * Overwrite the last action from
16461                          * RSS action to Queue action.
16462                          */
16463                         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
16464                                 hrxq_idx[i]);
16465                         if (!hrxq) {
16466                                 DRV_LOG(ERR, "Failed to create policy hrxq");
16467                                 goto rss_sub_policy_error;
16468                         }
16469                         act_cnt = &mtr_policy->act_cnt[i];
16470                         if (act_cnt->rix_mark || act_cnt->modify_hdr) {
16471                                 memset(&dh, 0, sizeof(struct mlx5_flow_handle));
16472                                 if (act_cnt->rix_mark)
16473                                         dh.mark = 1;
16474                                 dh.fate_action = MLX5_FLOW_FATE_QUEUE;
16475                                 dh.rix_hrxq = hrxq_idx[i];
16476                                 flow_drv_rxq_flags_set(dev, &dh);
16477                         }
16478                 }
16479         }
16480         if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
16481                 sub_policy, domain)) {
16482                 DRV_LOG(ERR, "Failed to create policy "
16483                         "rules per domain.");
16484                 goto rss_sub_policy_error;
16485         }
16486         if (sub_policy != mtr_policy->sub_policys[domain][0]) {
16487                 i = (mtr_policy->sub_policy_num >>
16488                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16489                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16490                 mtr_policy->sub_policys[domain][i] = sub_policy;
16491                 i++;
16492                 if (i > MLX5_MTR_RSS_MAX_SUB_POLICY)
16493                         goto rss_sub_policy_error;
16494                 mtr_policy->sub_policy_num &= ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
16495                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
16496                 mtr_policy->sub_policy_num |=
16497                         (i & MLX5_MTR_SUB_POLICY_NUM_MASK) <<
16498                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
16499         }
16500         rte_spinlock_unlock(&mtr_policy->sl);
16501         *is_reuse = false;
16502         return sub_policy;
16503 rss_sub_policy_error:
16504         if (sub_policy) {
16505                 __flow_dv_destroy_sub_policy_rules(dev, sub_policy);
16506                 if (sub_policy != mtr_policy->sub_policys[domain][0]) {
16507                         i = (mtr_policy->sub_policy_num >>
16508                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16509                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16510                         mtr_policy->sub_policys[domain][i] = NULL;
16511                         mlx5_ipool_free
16512                         (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16513                                         sub_policy->idx);
16514                 }
16515         }
16516         rte_spinlock_unlock(&mtr_policy->sl);
16517         return NULL;
16518 }
16519
16520 /**
16521  * Find the policy table for prefix table with RSS.
16522  *
16523  * @param[in] dev
16524  *   Pointer to Ethernet device.
16525  * @param[in] mtr_policy
16526  *   Pointer to meter policy table.
16527  * @param[in] rss_desc
16528  *   Pointer to rss_desc
16529  * @return
16530  *   Pointer to table set on success, NULL otherwise and rte_errno is set.
16531  */
16532 static struct mlx5_flow_meter_sub_policy *
16533 flow_dv_meter_sub_policy_rss_prepare(struct rte_eth_dev *dev,
16534                 struct mlx5_flow_meter_policy *mtr_policy,
16535                 struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS])
16536 {
16537         struct mlx5_priv *priv = dev->data->dev_private;
16538         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
16539         struct mlx5_flow_meter_info *next_fm;
16540         struct mlx5_flow_meter_policy *next_policy;
16541         struct mlx5_flow_meter_sub_policy *next_sub_policy = NULL;
16542         struct mlx5_flow_meter_policy *policies[MLX5_MTR_CHAIN_MAX_NUM];
16543         struct mlx5_flow_meter_sub_policy *sub_policies[MLX5_MTR_CHAIN_MAX_NUM];
16544         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
16545         bool reuse_sub_policy;
16546         uint32_t i = 0;
16547         uint32_t j = 0;
16548
16549         while (true) {
16550                 /* Iterate hierarchy to get all policies in this hierarchy. */
16551                 policies[i++] = mtr_policy;
16552                 if (!mtr_policy->is_hierarchy)
16553                         break;
16554                 if (i >= MLX5_MTR_CHAIN_MAX_NUM) {
16555                         DRV_LOG(ERR, "Exceed max meter number in hierarchy.");
16556                         return NULL;
16557                 }
16558                 next_fm = mlx5_flow_meter_find(priv,
16559                         mtr_policy->act_cnt[RTE_COLOR_GREEN].next_mtr_id, NULL);
16560                 if (!next_fm) {
16561                         DRV_LOG(ERR, "Failed to get next meter in hierarchy.");
16562                         return NULL;
16563                 }
16564                 next_policy =
16565                         mlx5_flow_meter_policy_find(dev, next_fm->policy_id,
16566                                                     NULL);
16567                 MLX5_ASSERT(next_policy);
16568                 mtr_policy = next_policy;
16569         }
16570         while (i) {
16571                 /**
16572                  * From last policy to the first one in hierarchy,
16573                  * create/get the sub policy for each of them.
16574                  */
16575                 sub_policy = __flow_dv_meter_get_rss_sub_policy(dev,
16576                                                         policies[--i],
16577                                                         rss_desc,
16578                                                         next_sub_policy,
16579                                                         &reuse_sub_policy);
16580                 if (!sub_policy) {
16581                         DRV_LOG(ERR, "Failed to get the sub policy.");
16582                         goto err_exit;
16583                 }
16584                 if (!reuse_sub_policy)
16585                         sub_policies[j++] = sub_policy;
16586                 next_sub_policy = sub_policy;
16587         }
16588         return sub_policy;
16589 err_exit:
16590         while (j) {
16591                 uint16_t sub_policy_num;
16592
16593                 sub_policy = sub_policies[--j];
16594                 mtr_policy = sub_policy->main_policy;
16595                 __flow_dv_destroy_sub_policy_rules(dev, sub_policy);
16596                 if (sub_policy != mtr_policy->sub_policys[domain][0]) {
16597                         sub_policy_num = (mtr_policy->sub_policy_num >>
16598                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16599                                 MLX5_MTR_SUB_POLICY_NUM_MASK;
16600                         mtr_policy->sub_policys[domain][sub_policy_num - 1] =
16601                                                                         NULL;
16602                         sub_policy_num--;
16603                         mtr_policy->sub_policy_num &=
16604                                 ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
16605                                   (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i));
16606                         mtr_policy->sub_policy_num |=
16607                         (sub_policy_num & MLX5_MTR_SUB_POLICY_NUM_MASK) <<
16608                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i);
16609                         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16610                                         sub_policy->idx);
16611                 }
16612         }
16613         return NULL;
16614 }
16615
16616 /**
16617  * Create the sub policy tag rule for all meters in hierarchy.
16618  *
16619  * @param[in] dev
16620  *   Pointer to Ethernet device.
16621  * @param[in] fm
16622  *   Meter information table.
16623  * @param[in] src_port
16624  *   The src port this extra rule should use.
16625  * @param[in] item
16626  *   The src port match item.
16627  * @param[out] error
16628  *   Perform verbose error reporting if not NULL.
16629  * @return
16630  *   0 on success, a negative errno value otherwise and rte_errno is set.
16631  */
16632 static int
16633 flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev,
16634                                 struct mlx5_flow_meter_info *fm,
16635                                 int32_t src_port,
16636                                 const struct rte_flow_item *item,
16637                                 struct rte_flow_error *error)
16638 {
16639         struct mlx5_priv *priv = dev->data->dev_private;
16640         struct mlx5_flow_meter_policy *mtr_policy;
16641         struct mlx5_flow_meter_sub_policy *sub_policy;
16642         struct mlx5_flow_meter_info *next_fm = NULL;
16643         struct mlx5_flow_meter_policy *next_policy;
16644         struct mlx5_flow_meter_sub_policy *next_sub_policy;
16645         struct mlx5_flow_tbl_data_entry *tbl_data;
16646         struct mlx5_sub_policy_color_rule *color_rule;
16647         struct mlx5_meter_policy_acts acts;
16648         uint32_t color_reg_c_idx;
16649         bool mtr_first = (src_port != UINT16_MAX) ? true : false;
16650         struct rte_flow_attr attr = {
16651                 .group = MLX5_FLOW_TABLE_LEVEL_POLICY,
16652                 .priority = 0,
16653                 .ingress = 0,
16654                 .egress = 0,
16655                 .transfer = 1,
16656                 .reserved = 0,
16657         };
16658         uint32_t domain = MLX5_MTR_DOMAIN_TRANSFER;
16659         int i;
16660
16661         mtr_policy = mlx5_flow_meter_policy_find(dev, fm->policy_id, NULL);
16662         MLX5_ASSERT(mtr_policy);
16663         if (!mtr_policy->is_hierarchy)
16664                 return 0;
16665         next_fm = mlx5_flow_meter_find(priv,
16666                         mtr_policy->act_cnt[RTE_COLOR_GREEN].next_mtr_id, NULL);
16667         if (!next_fm) {
16668                 return rte_flow_error_set(error, EINVAL,
16669                                 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
16670                                 "Failed to find next meter in hierarchy.");
16671         }
16672         if (!next_fm->drop_cnt)
16673                 goto exit;
16674         color_reg_c_idx = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, error);
16675         sub_policy = mtr_policy->sub_policys[domain][0];
16676         for (i = 0; i < RTE_COLORS; i++) {
16677                 bool rule_exist = false;
16678                 struct mlx5_meter_policy_action_container *act_cnt;
16679
16680                 if (i >= RTE_COLOR_YELLOW)
16681                         break;
16682                 TAILQ_FOREACH(color_rule,
16683                               &sub_policy->color_rules[i], next_port)
16684                         if (color_rule->src_port == src_port) {
16685                                 rule_exist = true;
16686                                 break;
16687                         }
16688                 if (rule_exist)
16689                         continue;
16690                 color_rule = mlx5_malloc(MLX5_MEM_ZERO,
16691                                 sizeof(struct mlx5_sub_policy_color_rule),
16692                                 0, SOCKET_ID_ANY);
16693                 if (!color_rule)
16694                         return rte_flow_error_set(error, ENOMEM,
16695                                 RTE_FLOW_ERROR_TYPE_ACTION,
16696                                 NULL, "No memory to create tag color rule.");
16697                 color_rule->src_port = src_port;
16698                 attr.priority = i;
16699                 next_policy = mlx5_flow_meter_policy_find(dev,
16700                                                 next_fm->policy_id, NULL);
16701                 MLX5_ASSERT(next_policy);
16702                 next_sub_policy = next_policy->sub_policys[domain][0];
16703                 tbl_data = container_of(next_sub_policy->tbl_rsc,
16704                                         struct mlx5_flow_tbl_data_entry, tbl);
16705                 act_cnt = &mtr_policy->act_cnt[i];
16706                 if (mtr_first) {
16707                         acts.dv_actions[0] = next_fm->meter_action;
16708                         acts.dv_actions[1] = act_cnt->modify_hdr->action;
16709                 } else {
16710                         acts.dv_actions[0] = act_cnt->modify_hdr->action;
16711                         acts.dv_actions[1] = next_fm->meter_action;
16712                 }
16713                 acts.dv_actions[2] = tbl_data->jump.action;
16714                 acts.actions_n = 3;
16715                 if (mlx5_flow_meter_attach(priv, next_fm, &attr, error)) {
16716                         next_fm = NULL;
16717                         goto err_exit;
16718                 }
16719                 if (__flow_dv_create_policy_matcher(dev, color_reg_c_idx,
16720                                         i, sub_policy, &attr, true, item,
16721                                         &color_rule->matcher, error)) {
16722                         rte_flow_error_set(error, errno,
16723                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
16724                                 "Failed to create hierarchy meter matcher.");
16725                         goto err_exit;
16726                 }
16727                 if (__flow_dv_create_policy_flow(dev, color_reg_c_idx,
16728                                         (enum rte_color)i,
16729                                         color_rule->matcher->matcher_object,
16730                                         acts.actions_n, acts.dv_actions,
16731                                         true, item,
16732                                         &color_rule->rule, &attr)) {
16733                         rte_flow_error_set(error, errno,
16734                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
16735                                 "Failed to create hierarchy meter rule.");
16736                         goto err_exit;
16737                 }
16738                 TAILQ_INSERT_TAIL(&sub_policy->color_rules[i],
16739                                   color_rule, next_port);
16740         }
16741 exit:
16742         /**
16743          * Recursive call to iterate all meters in hierarchy and
16744          * create needed rules.
16745          */
16746         return flow_dv_meter_hierarchy_rule_create(dev, next_fm,
16747                                                 src_port, item, error);
16748 err_exit:
16749         if (color_rule) {
16750                 if (color_rule->rule)
16751                         mlx5_flow_os_destroy_flow(color_rule->rule);
16752                 if (color_rule->matcher) {
16753                         struct mlx5_flow_tbl_data_entry *tbl =
16754                                 container_of(color_rule->matcher->tbl,
16755                                                 typeof(*tbl), tbl);
16756                         mlx5_list_unregister(&tbl->matchers,
16757                                                 &color_rule->matcher->entry);
16758                 }
16759                 mlx5_free(color_rule);
16760         }
16761         if (next_fm)
16762                 mlx5_flow_meter_detach(priv, next_fm);
16763         return -rte_errno;
16764 }
16765
16766 /**
16767  * Destroy the sub policy table with RX queue.
16768  *
16769  * @param[in] dev
16770  *   Pointer to Ethernet device.
16771  * @param[in] mtr_policy
16772  *   Pointer to meter policy table.
16773  */
16774 static void
16775 flow_dv_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev,
16776                 struct mlx5_flow_meter_policy *mtr_policy)
16777 {
16778         struct mlx5_priv *priv = dev->data->dev_private;
16779         struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
16780         uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
16781         uint32_t i, j;
16782         uint16_t sub_policy_num, new_policy_num;
16783
16784         rte_spinlock_lock(&mtr_policy->sl);
16785         for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
16786                 switch (mtr_policy->act_cnt[i].fate_action) {
16787                 case MLX5_FLOW_FATE_SHARED_RSS:
16788                         sub_policy_num = (mtr_policy->sub_policy_num >>
16789                         (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
16790                         MLX5_MTR_SUB_POLICY_NUM_MASK;
16791                         new_policy_num = sub_policy_num;
16792                         for (j = 0; j < sub_policy_num; j++) {
16793                                 sub_policy =
16794                                         mtr_policy->sub_policys[domain][j];
16795                                 if (sub_policy) {
16796                                         __flow_dv_destroy_sub_policy_rules(dev,
16797                                                 sub_policy);
16798                                 if (sub_policy !=
16799                                         mtr_policy->sub_policys[domain][0]) {
16800                                         mtr_policy->sub_policys[domain][j] =
16801                                                                 NULL;
16802                                         mlx5_ipool_free
16803                                 (priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
16804                                                 sub_policy->idx);
16805                                                 new_policy_num--;
16806                                         }
16807                                 }
16808                         }
16809                         if (new_policy_num != sub_policy_num) {
16810                                 mtr_policy->sub_policy_num &=
16811                                 ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
16812                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
16813                                 mtr_policy->sub_policy_num |=
16814                                 (new_policy_num &
16815                                         MLX5_MTR_SUB_POLICY_NUM_MASK) <<
16816                                 (MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
16817                         }
16818                         break;
16819                 case MLX5_FLOW_FATE_QUEUE:
16820                         sub_policy = mtr_policy->sub_policys[domain][0];
16821                         __flow_dv_destroy_sub_policy_rules(dev,
16822                                                 sub_policy);
16823                         break;
16824                 default:
16825                         /*Other actions without queue and do nothing*/
16826                         break;
16827                 }
16828         }
16829         rte_spinlock_unlock(&mtr_policy->sl);
16830 }
16831
16832 /**
16833  * Validate the batch counter support in root table.
16834  *
16835  * Create a simple flow with invalid counter and drop action on root table to
16836  * validate if batch counter with offset on root table is supported or not.
16837  *
16838  * @param[in] dev
16839  *   Pointer to rte_eth_dev structure.
16840  *
16841  * @return
16842  *   0 on success, a negative errno value otherwise and rte_errno is set.
16843  */
16844 int
16845 mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
16846 {
16847         struct mlx5_priv *priv = dev->data->dev_private;
16848         struct mlx5_dev_ctx_shared *sh = priv->sh;
16849         struct mlx5_flow_dv_match_params mask = {
16850                 .size = sizeof(mask.buf),
16851         };
16852         struct mlx5_flow_dv_match_params value = {
16853                 .size = sizeof(value.buf),
16854         };
16855         struct mlx5dv_flow_matcher_attr dv_attr = {
16856                 .type = IBV_FLOW_ATTR_NORMAL | IBV_FLOW_ATTR_FLAGS_EGRESS,
16857                 .priority = 0,
16858                 .match_criteria_enable = 0,
16859                 .match_mask = (void *)&mask,
16860         };
16861         void *actions[2] = { 0 };
16862         struct mlx5_flow_tbl_resource *tbl = NULL;
16863         struct mlx5_devx_obj *dcs = NULL;
16864         void *matcher = NULL;
16865         void *flow = NULL;
16866         int ret = -1;
16867
16868         tbl = flow_dv_tbl_resource_get(dev, 0, 1, 0, false, NULL,
16869                                         0, 0, 0, NULL);
16870         if (!tbl)
16871                 goto err;
16872         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
16873         if (!dcs)
16874                 goto err;
16875         ret = mlx5_flow_os_create_flow_action_count(dcs->obj, UINT16_MAX,
16876                                                     &actions[0]);
16877         if (ret)
16878                 goto err;
16879         dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
16880         __flow_dv_adjust_buf_size(&mask.size, dv_attr.match_criteria_enable);
16881         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
16882                                                &matcher);
16883         if (ret)
16884                 goto err;
16885         __flow_dv_adjust_buf_size(&value.size, dv_attr.match_criteria_enable);
16886         ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 1,
16887                                        actions, &flow);
16888 err:
16889         /*
16890          * If batch counter with offset is not supported, the driver will not
16891          * validate the invalid offset value, flow create should success.
16892          * In this case, it means batch counter is not supported in root table.
16893          *
16894          * Otherwise, if flow create is failed, counter offset is supported.
16895          */
16896         if (flow) {
16897                 DRV_LOG(INFO, "Batch counter is not supported in root "
16898                               "table. Switch to fallback mode.");
16899                 rte_errno = ENOTSUP;
16900                 ret = -rte_errno;
16901                 claim_zero(mlx5_flow_os_destroy_flow(flow));
16902         } else {
16903                 /* Check matcher to make sure validate fail at flow create. */
16904                 if (!matcher || (matcher && errno != EINVAL))
16905                         DRV_LOG(ERR, "Unexpected error in counter offset "
16906                                      "support detection");
16907                 ret = 0;
16908         }
16909         if (actions[0])
16910                 claim_zero(mlx5_flow_os_destroy_flow_action(actions[0]));
16911         if (matcher)
16912                 claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
16913         if (tbl)
16914                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
16915         if (dcs)
16916                 claim_zero(mlx5_devx_cmd_destroy(dcs));
16917         return ret;
16918 }
16919
16920 /**
16921  * Query a devx counter.
16922  *
16923  * @param[in] dev
16924  *   Pointer to the Ethernet device structure.
16925  * @param[in] cnt
16926  *   Index to the flow counter.
16927  * @param[in] clear
16928  *   Set to clear the counter statistics.
16929  * @param[out] pkts
16930  *   The statistics value of packets.
16931  * @param[out] bytes
16932  *   The statistics value of bytes.
16933  *
16934  * @return
16935  *   0 on success, otherwise return -1.
16936  */
16937 static int
16938 flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear,
16939                       uint64_t *pkts, uint64_t *bytes)
16940 {
16941         struct mlx5_priv *priv = dev->data->dev_private;
16942         struct mlx5_flow_counter *cnt;
16943         uint64_t inn_pkts, inn_bytes;
16944         int ret;
16945
16946         if (!priv->config.devx)
16947                 return -1;
16948
16949         ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes);
16950         if (ret)
16951                 return -1;
16952         cnt = flow_dv_counter_get_by_idx(dev, counter, NULL);
16953         *pkts = inn_pkts - cnt->hits;
16954         *bytes = inn_bytes - cnt->bytes;
16955         if (clear) {
16956                 cnt->hits = inn_pkts;
16957                 cnt->bytes = inn_bytes;
16958         }
16959         return 0;
16960 }
16961
16962 /**
16963  * Get aged-out flows.
16964  *
16965  * @param[in] dev
16966  *   Pointer to the Ethernet device structure.
16967  * @param[in] context
16968  *   The address of an array of pointers to the aged-out flows contexts.
16969  * @param[in] nb_contexts
16970  *   The length of context array pointers.
16971  * @param[out] error
16972  *   Perform verbose error reporting if not NULL. Initialized in case of
16973  *   error only.
16974  *
16975  * @return
16976  *   how many contexts get in success, otherwise negative errno value.
16977  *   if nb_contexts is 0, return the amount of all aged contexts.
16978  *   if nb_contexts is not 0 , return the amount of aged flows reported
16979  *   in the context array.
16980  * @note: only stub for now
16981  */
16982 static int
16983 flow_get_aged_flows(struct rte_eth_dev *dev,
16984                     void **context,
16985                     uint32_t nb_contexts,
16986                     struct rte_flow_error *error)
16987 {
16988         struct mlx5_priv *priv = dev->data->dev_private;
16989         struct mlx5_age_info *age_info;
16990         struct mlx5_age_param *age_param;
16991         struct mlx5_flow_counter *counter;
16992         struct mlx5_aso_age_action *act;
16993         int nb_flows = 0;
16994
16995         if (nb_contexts && !context)
16996                 return rte_flow_error_set(error, EINVAL,
16997                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
16998                                           NULL, "empty context");
16999         age_info = GET_PORT_AGE_INFO(priv);
17000         rte_spinlock_lock(&age_info->aged_sl);
17001         LIST_FOREACH(act, &age_info->aged_aso, next) {
17002                 nb_flows++;
17003                 if (nb_contexts) {
17004                         context[nb_flows - 1] =
17005                                                 act->age_params.context;
17006                         if (!(--nb_contexts))
17007                                 break;
17008                 }
17009         }
17010         TAILQ_FOREACH(counter, &age_info->aged_counters, next) {
17011                 nb_flows++;
17012                 if (nb_contexts) {
17013                         age_param = MLX5_CNT_TO_AGE(counter);
17014                         context[nb_flows - 1] = age_param->context;
17015                         if (!(--nb_contexts))
17016                                 break;
17017                 }
17018         }
17019         rte_spinlock_unlock(&age_info->aged_sl);
17020         MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
17021         return nb_flows;
17022 }
17023
17024 /*
17025  * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
17026  */
17027 static uint32_t
17028 flow_dv_counter_allocate(struct rte_eth_dev *dev)
17029 {
17030         return flow_dv_counter_alloc(dev, 0);
17031 }
17032
17033 /**
17034  * Validate indirect action.
17035  * Dispatcher for action type specific validation.
17036  *
17037  * @param[in] dev
17038  *   Pointer to the Ethernet device structure.
17039  * @param[in] conf
17040  *   Indirect action configuration.
17041  * @param[in] action
17042  *   The indirect action object to validate.
17043  * @param[out] error
17044  *   Perform verbose error reporting if not NULL. Initialized in case of
17045  *   error only.
17046  *
17047  * @return
17048  *   0 on success, otherwise negative errno value.
17049  */
17050 static int
17051 flow_dv_action_validate(struct rte_eth_dev *dev,
17052                         const struct rte_flow_indir_action_conf *conf,
17053                         const struct rte_flow_action *action,
17054                         struct rte_flow_error *err)
17055 {
17056         struct mlx5_priv *priv = dev->data->dev_private;
17057
17058         RTE_SET_USED(conf);
17059         switch (action->type) {
17060         case RTE_FLOW_ACTION_TYPE_RSS:
17061                 /*
17062                  * priv->obj_ops is set according to driver capabilities.
17063                  * When DevX capabilities are
17064                  * sufficient, it is set to devx_obj_ops.
17065                  * Otherwise, it is set to ibv_obj_ops.
17066                  * ibv_obj_ops doesn't support ind_table_modify operation.
17067                  * In this case the indirect RSS action can't be used.
17068                  */
17069                 if (priv->obj_ops.ind_table_modify == NULL)
17070                         return rte_flow_error_set
17071                                         (err, ENOTSUP,
17072                                          RTE_FLOW_ERROR_TYPE_ACTION,
17073                                          NULL,
17074                                          "Indirect RSS action not supported");
17075                 return mlx5_validate_action_rss(dev, action, err);
17076         case RTE_FLOW_ACTION_TYPE_AGE:
17077                 if (!priv->sh->aso_age_mng)
17078                         return rte_flow_error_set(err, ENOTSUP,
17079                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
17080                                                 NULL,
17081                                                 "Indirect age action not supported");
17082                 return flow_dv_validate_action_age(0, action, dev, err);
17083         case RTE_FLOW_ACTION_TYPE_COUNT:
17084                 /*
17085                  * There are two mechanisms to share the action count.
17086                  * The old mechanism uses the shared field to share, while the
17087                  * new mechanism uses the indirect action API.
17088                  * This validation comes to make sure that the two mechanisms
17089                  * are not combined.
17090                  */
17091                 if (is_shared_action_count(action))
17092                         return rte_flow_error_set(err, ENOTSUP,
17093                                                   RTE_FLOW_ERROR_TYPE_ACTION,
17094                                                   NULL,
17095                                                   "Mix shared and indirect counter is not supported");
17096                 return flow_dv_validate_action_count(dev, true, 0, err);
17097         case RTE_FLOW_ACTION_TYPE_CONNTRACK:
17098                 if (!priv->sh->ct_aso_en)
17099                         return rte_flow_error_set(err, ENOTSUP,
17100                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
17101                                         "ASO CT is not supported");
17102                 return mlx5_validate_action_ct(dev, action->conf, err);
17103         default:
17104                 return rte_flow_error_set(err, ENOTSUP,
17105                                           RTE_FLOW_ERROR_TYPE_ACTION,
17106                                           NULL,
17107                                           "action type not supported");
17108         }
17109 }
17110
17111 /**
17112  * Validate the meter hierarchy chain for meter policy.
17113  *
17114  * @param[in] dev
17115  *   Pointer to the Ethernet device structure.
17116  * @param[in] meter_id
17117  *   Meter id.
17118  * @param[in] action_flags
17119  *   Holds the actions detected until now.
17120  * @param[out] is_rss
17121  *   Is RSS or not.
17122  * @param[out] hierarchy_domain
17123  *   The domain bitmap for hierarchy policy.
17124  * @param[out] error
17125  *   Perform verbose error reporting if not NULL. Initialized in case of
17126  *   error only.
17127  *
17128  * @return
17129  *   0 on success, otherwise negative errno value with error set.
17130  */
17131 static int
17132 flow_dv_validate_policy_mtr_hierarchy(struct rte_eth_dev *dev,
17133                                   uint32_t meter_id,
17134                                   uint64_t action_flags,
17135                                   bool *is_rss,
17136                                   uint8_t *hierarchy_domain,
17137                                   struct rte_mtr_error *error)
17138 {
17139         struct mlx5_priv *priv = dev->data->dev_private;
17140         struct mlx5_flow_meter_info *fm;
17141         struct mlx5_flow_meter_policy *policy;
17142         uint8_t cnt = 1;
17143
17144         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
17145                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
17146                 return -rte_mtr_error_set(error, EINVAL,
17147                                         RTE_MTR_ERROR_TYPE_POLICER_ACTION_GREEN,
17148                                         NULL,
17149                                         "Multiple fate actions not supported.");
17150         while (true) {
17151                 fm = mlx5_flow_meter_find(priv, meter_id, NULL);
17152                 if (!fm)
17153                         return -rte_mtr_error_set(error, EINVAL,
17154                                                 RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
17155                                         "Meter not found in meter hierarchy.");
17156                 if (fm->def_policy)
17157                         return -rte_mtr_error_set(error, EINVAL,
17158                                         RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
17159                         "Non termination meter not supported in hierarchy.");
17160                 policy = mlx5_flow_meter_policy_find(dev, fm->policy_id, NULL);
17161                 MLX5_ASSERT(policy);
17162                 if (!policy->is_hierarchy) {
17163                         if (policy->transfer)
17164                                 *hierarchy_domain |=
17165                                                 MLX5_MTR_DOMAIN_TRANSFER_BIT;
17166                         if (policy->ingress)
17167                                 *hierarchy_domain |=
17168                                                 MLX5_MTR_DOMAIN_INGRESS_BIT;
17169                         if (policy->egress)
17170                                 *hierarchy_domain |= MLX5_MTR_DOMAIN_EGRESS_BIT;
17171                         *is_rss = policy->is_rss;
17172                         break;
17173                 }
17174                 meter_id = policy->act_cnt[RTE_COLOR_GREEN].next_mtr_id;
17175                 if (++cnt >= MLX5_MTR_CHAIN_MAX_NUM)
17176                         return -rte_mtr_error_set(error, EINVAL,
17177                                         RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
17178                                         "Exceed max hierarchy meter number.");
17179         }
17180         return 0;
17181 }
17182
17183 /**
17184  * Validate meter policy actions.
17185  * Dispatcher for action type specific validation.
17186  *
17187  * @param[in] dev
17188  *   Pointer to the Ethernet device structure.
17189  * @param[in] action
17190  *   The meter policy action object to validate.
17191  * @param[in] attr
17192  *   Attributes of flow to determine steering domain.
17193  * @param[out] error
17194  *   Perform verbose error reporting if not NULL. Initialized in case of
17195  *   error only.
17196  *
17197  * @return
17198  *   0 on success, otherwise negative errno value.
17199  */
17200 static int
17201 flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev,
17202                         const struct rte_flow_action *actions[RTE_COLORS],
17203                         struct rte_flow_attr *attr,
17204                         bool *is_rss,
17205                         uint8_t *domain_bitmap,
17206                         bool *is_def_policy,
17207                         struct rte_mtr_error *error)
17208 {
17209         struct mlx5_priv *priv = dev->data->dev_private;
17210         struct mlx5_dev_config *dev_conf = &priv->config;
17211         const struct rte_flow_action *act;
17212         uint64_t action_flags = 0;
17213         int actions_n;
17214         int i, ret;
17215         struct rte_flow_error flow_err;
17216         uint8_t domain_color[RTE_COLORS] = {0};
17217         uint8_t def_domain = MLX5_MTR_ALL_DOMAIN_BIT;
17218         uint8_t hierarchy_domain = 0;
17219         const struct rte_flow_action_meter *mtr;
17220
17221         if (!priv->config.dv_esw_en)
17222                 def_domain &= ~MLX5_MTR_DOMAIN_TRANSFER_BIT;
17223         *domain_bitmap = def_domain;
17224         if (actions[RTE_COLOR_YELLOW] &&
17225                 actions[RTE_COLOR_YELLOW]->type != RTE_FLOW_ACTION_TYPE_END)
17226                 return -rte_mtr_error_set(error, ENOTSUP,
17227                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
17228                                 NULL,
17229                                 "Yellow color does not support any action.");
17230         if (actions[RTE_COLOR_YELLOW] &&
17231                 actions[RTE_COLOR_YELLOW]->type != RTE_FLOW_ACTION_TYPE_DROP)
17232                 return -rte_mtr_error_set(error, ENOTSUP,
17233                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
17234                                 NULL, "Red color only supports drop action.");
17235         /*
17236          * Check default policy actions:
17237          * Green/Yellow: no action, Red: drop action
17238          */
17239         if ((!actions[RTE_COLOR_GREEN] ||
17240                 actions[RTE_COLOR_GREEN]->type == RTE_FLOW_ACTION_TYPE_END)) {
17241                 *is_def_policy = true;
17242                 return 0;
17243         }
17244         flow_err.message = NULL;
17245         for (i = 0; i < RTE_COLORS; i++) {
17246                 act = actions[i];
17247                 for (action_flags = 0, actions_n = 0;
17248                         act && act->type != RTE_FLOW_ACTION_TYPE_END;
17249                         act++) {
17250                         if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
17251                                 return -rte_mtr_error_set(error, ENOTSUP,
17252                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17253                                           NULL, "too many actions");
17254                         switch (act->type) {
17255                         case RTE_FLOW_ACTION_TYPE_PORT_ID:
17256                                 if (!priv->config.dv_esw_en)
17257                                         return -rte_mtr_error_set(error,
17258                                         ENOTSUP,
17259                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17260                                         NULL, "PORT action validate check"
17261                                         " fail for ESW disable");
17262                                 ret = flow_dv_validate_action_port_id(dev,
17263                                                 action_flags,
17264                                                 act, attr, &flow_err);
17265                                 if (ret)
17266                                         return -rte_mtr_error_set(error,
17267                                         ENOTSUP,
17268                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17269                                         NULL, flow_err.message ?
17270                                         flow_err.message :
17271                                         "PORT action validate check fail");
17272                                 ++actions_n;
17273                                 action_flags |= MLX5_FLOW_ACTION_PORT_ID;
17274                                 break;
17275                         case RTE_FLOW_ACTION_TYPE_MARK:
17276                                 ret = flow_dv_validate_action_mark(dev, act,
17277                                                            action_flags,
17278                                                            attr, &flow_err);
17279                                 if (ret < 0)
17280                                         return -rte_mtr_error_set(error,
17281                                         ENOTSUP,
17282                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17283                                         NULL, flow_err.message ?
17284                                         flow_err.message :
17285                                         "Mark action validate check fail");
17286                                 if (dev_conf->dv_xmeta_en !=
17287                                         MLX5_XMETA_MODE_LEGACY)
17288                                         return -rte_mtr_error_set(error,
17289                                         ENOTSUP,
17290                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17291                                         NULL, "Extend MARK action is "
17292                                         "not supported. Please try use "
17293                                         "default policy for meter.");
17294                                 action_flags |= MLX5_FLOW_ACTION_MARK;
17295                                 ++actions_n;
17296                                 break;
17297                         case RTE_FLOW_ACTION_TYPE_SET_TAG:
17298                                 ret = flow_dv_validate_action_set_tag(dev,
17299                                                         act, action_flags,
17300                                                         attr, &flow_err);
17301                                 if (ret)
17302                                         return -rte_mtr_error_set(error,
17303                                         ENOTSUP,
17304                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17305                                         NULL, flow_err.message ?
17306                                         flow_err.message :
17307                                         "Set tag action validate check fail");
17308                                 /*
17309                                  * Count all modify-header actions
17310                                  * as one action.
17311                                  */
17312                                 if (!(action_flags &
17313                                         MLX5_FLOW_MODIFY_HDR_ACTIONS))
17314                                         ++actions_n;
17315                                 action_flags |= MLX5_FLOW_ACTION_SET_TAG;
17316                                 break;
17317                         case RTE_FLOW_ACTION_TYPE_DROP:
17318                                 ret = mlx5_flow_validate_action_drop
17319                                         (action_flags,
17320                                         attr, &flow_err);
17321                                 if (ret < 0)
17322                                         return -rte_mtr_error_set(error,
17323                                         ENOTSUP,
17324                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17325                                         NULL, flow_err.message ?
17326                                         flow_err.message :
17327                                         "Drop action validate check fail");
17328                                 action_flags |= MLX5_FLOW_ACTION_DROP;
17329                                 ++actions_n;
17330                                 break;
17331                         case RTE_FLOW_ACTION_TYPE_QUEUE:
17332                                 /*
17333                                  * Check whether extensive
17334                                  * metadata feature is engaged.
17335                                  */
17336                                 if (dev_conf->dv_flow_en &&
17337                                         (dev_conf->dv_xmeta_en !=
17338                                         MLX5_XMETA_MODE_LEGACY) &&
17339                                         mlx5_flow_ext_mreg_supported(dev))
17340                                         return -rte_mtr_error_set(error,
17341                                           ENOTSUP,
17342                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17343                                           NULL, "Queue action with meta "
17344                                           "is not supported. Please try use "
17345                                           "default policy for meter.");
17346                                 ret = mlx5_flow_validate_action_queue(act,
17347                                                         action_flags, dev,
17348                                                         attr, &flow_err);
17349                                 if (ret < 0)
17350                                         return -rte_mtr_error_set(error,
17351                                           ENOTSUP,
17352                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17353                                           NULL, flow_err.message ?
17354                                           flow_err.message :
17355                                           "Queue action validate check fail");
17356                                 action_flags |= MLX5_FLOW_ACTION_QUEUE;
17357                                 ++actions_n;
17358                                 break;
17359                         case RTE_FLOW_ACTION_TYPE_RSS:
17360                                 if (dev_conf->dv_flow_en &&
17361                                         (dev_conf->dv_xmeta_en !=
17362                                         MLX5_XMETA_MODE_LEGACY) &&
17363                                         mlx5_flow_ext_mreg_supported(dev))
17364                                         return -rte_mtr_error_set(error,
17365                                           ENOTSUP,
17366                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17367                                           NULL, "RSS action with meta "
17368                                           "is not supported. Please try use "
17369                                           "default policy for meter.");
17370                                 ret = mlx5_validate_action_rss(dev, act,
17371                                                 &flow_err);
17372                                 if (ret < 0)
17373                                         return -rte_mtr_error_set(error,
17374                                           ENOTSUP,
17375                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17376                                           NULL, flow_err.message ?
17377                                           flow_err.message :
17378                                           "RSS action validate check fail");
17379                                 action_flags |= MLX5_FLOW_ACTION_RSS;
17380                                 ++actions_n;
17381                                 *is_rss = true;
17382                                 break;
17383                         case RTE_FLOW_ACTION_TYPE_JUMP:
17384                                 ret = flow_dv_validate_action_jump(dev,
17385                                         NULL, act, action_flags,
17386                                         attr, true, &flow_err);
17387                                 if (ret)
17388                                         return -rte_mtr_error_set(error,
17389                                           ENOTSUP,
17390                                           RTE_MTR_ERROR_TYPE_METER_POLICY,
17391                                           NULL, flow_err.message ?
17392                                           flow_err.message :
17393                                           "Jump action validate check fail");
17394                                 ++actions_n;
17395                                 action_flags |= MLX5_FLOW_ACTION_JUMP;
17396                                 break;
17397                         case RTE_FLOW_ACTION_TYPE_METER:
17398                                 if (i != RTE_COLOR_GREEN)
17399                                         return -rte_mtr_error_set(error,
17400                                                 ENOTSUP,
17401                                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
17402                                                 NULL, flow_err.message ?
17403                                                 flow_err.message :
17404                                   "Meter hierarchy only supports GREEN color.");
17405                                 mtr = act->conf;
17406                                 ret = flow_dv_validate_policy_mtr_hierarchy(dev,
17407                                                         mtr->mtr_id,
17408                                                         action_flags,
17409                                                         is_rss,
17410                                                         &hierarchy_domain,
17411                                                         error);
17412                                 if (ret)
17413                                         return ret;
17414                                 ++actions_n;
17415                                 action_flags |=
17416                                 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
17417                                 break;
17418                         default:
17419                                 return -rte_mtr_error_set(error, ENOTSUP,
17420                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17421                                         NULL,
17422                                         "Doesn't support optional action");
17423                         }
17424                 }
17425                 /* Yellow is not supported, just skip. */
17426                 if (i == RTE_COLOR_YELLOW)
17427                         continue;
17428                 if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
17429                         domain_color[i] = MLX5_MTR_DOMAIN_TRANSFER_BIT;
17430                 else if ((action_flags &
17431                         (MLX5_FLOW_ACTION_RSS | MLX5_FLOW_ACTION_QUEUE)) ||
17432                         (action_flags & MLX5_FLOW_ACTION_MARK))
17433                         /*
17434                          * Only support MLX5_XMETA_MODE_LEGACY
17435                          * so MARK action only in ingress domain.
17436                          */
17437                         domain_color[i] = MLX5_MTR_DOMAIN_INGRESS_BIT;
17438                 else if (action_flags &
17439                         MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
17440                         domain_color[i] = hierarchy_domain;
17441                 else
17442                         domain_color[i] = def_domain;
17443                 /*
17444                  * Validate the drop action mutual exclusion
17445                  * with other actions. Drop action is mutually-exclusive
17446                  * with any other action, except for Count action.
17447                  */
17448                 if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
17449                         (action_flags & ~MLX5_FLOW_ACTION_DROP)) {
17450                         return -rte_mtr_error_set(error, ENOTSUP,
17451                                 RTE_MTR_ERROR_TYPE_METER_POLICY,
17452                                 NULL, "Drop action is mutually-exclusive "
17453                                 "with any other action");
17454                 }
17455                 /* Eswitch has few restrictions on using items and actions */
17456                 if (domain_color[i] & MLX5_MTR_DOMAIN_TRANSFER_BIT) {
17457                         if (!mlx5_flow_ext_mreg_supported(dev) &&
17458                                 action_flags & MLX5_FLOW_ACTION_MARK)
17459                                 return -rte_mtr_error_set(error, ENOTSUP,
17460                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17461                                         NULL, "unsupported action MARK");
17462                         if (action_flags & MLX5_FLOW_ACTION_QUEUE)
17463                                 return -rte_mtr_error_set(error, ENOTSUP,
17464                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17465                                         NULL, "unsupported action QUEUE");
17466                         if (action_flags & MLX5_FLOW_ACTION_RSS)
17467                                 return -rte_mtr_error_set(error, ENOTSUP,
17468                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17469                                         NULL, "unsupported action RSS");
17470                         if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
17471                                 return -rte_mtr_error_set(error, ENOTSUP,
17472                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17473                                         NULL, "no fate action is found");
17474                 } else {
17475                         if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) &&
17476                                 (domain_color[i] &
17477                                 MLX5_MTR_DOMAIN_INGRESS_BIT)) {
17478                                 if ((domain_color[i] &
17479                                         MLX5_MTR_DOMAIN_EGRESS_BIT))
17480                                         domain_color[i] =
17481                                         MLX5_MTR_DOMAIN_EGRESS_BIT;
17482                                 else
17483                                         return -rte_mtr_error_set(error,
17484                                         ENOTSUP,
17485                                         RTE_MTR_ERROR_TYPE_METER_POLICY,
17486                                         NULL, "no fate action is found");
17487                         }
17488                 }
17489                 if (domain_color[i] != def_domain)
17490                         *domain_bitmap = domain_color[i];
17491         }
17492         return 0;
17493 }
17494
17495 static int
17496 flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags)
17497 {
17498         struct mlx5_priv *priv = dev->data->dev_private;
17499         int ret = 0;
17500
17501         if ((domains & MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain != NULL) {
17502                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->rx_domain,
17503                                                 flags);
17504                 if (ret != 0)
17505                         return ret;
17506         }
17507         if ((domains & MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain != NULL) {
17508                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->tx_domain, flags);
17509                 if (ret != 0)
17510                         return ret;
17511         }
17512         if ((domains & MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain != NULL) {
17513                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->fdb_domain, flags);
17514                 if (ret != 0)
17515                         return ret;
17516         }
17517         return 0;
17518 }
17519
17520 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
17521         .validate = flow_dv_validate,
17522         .prepare = flow_dv_prepare,
17523         .translate = flow_dv_translate,
17524         .apply = flow_dv_apply,
17525         .remove = flow_dv_remove,
17526         .destroy = flow_dv_destroy,
17527         .query = flow_dv_query,
17528         .create_mtr_tbls = flow_dv_create_mtr_tbls,
17529         .destroy_mtr_tbls = flow_dv_destroy_mtr_tbls,
17530         .destroy_mtr_drop_tbls = flow_dv_destroy_mtr_drop_tbls,
17531         .create_meter = flow_dv_mtr_alloc,
17532         .free_meter = flow_dv_aso_mtr_release_to_pool,
17533         .validate_mtr_acts = flow_dv_validate_mtr_policy_acts,
17534         .create_mtr_acts = flow_dv_create_mtr_policy_acts,
17535         .destroy_mtr_acts = flow_dv_destroy_mtr_policy_acts,
17536         .create_policy_rules = flow_dv_create_policy_rules,
17537         .destroy_policy_rules = flow_dv_destroy_policy_rules,
17538         .create_def_policy = flow_dv_create_def_policy,
17539         .destroy_def_policy = flow_dv_destroy_def_policy,
17540         .meter_sub_policy_rss_prepare = flow_dv_meter_sub_policy_rss_prepare,
17541         .meter_hierarchy_rule_create = flow_dv_meter_hierarchy_rule_create,
17542         .destroy_sub_policy_with_rxq = flow_dv_destroy_sub_policy_with_rxq,
17543         .counter_alloc = flow_dv_counter_allocate,
17544         .counter_free = flow_dv_counter_free,
17545         .counter_query = flow_dv_counter_query,
17546         .get_aged_flows = flow_get_aged_flows,
17547         .action_validate = flow_dv_action_validate,
17548         .action_create = flow_dv_action_create,
17549         .action_destroy = flow_dv_action_destroy,
17550         .action_update = flow_dv_action_update,
17551         .action_query = flow_dv_action_query,
17552         .sync_domain = flow_dv_sync_domain,
17553 };
17554
17555 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
17556